ioa_cfg           590 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg           593 drivers/scsi/ipr.c 	trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
ioa_cfg           594 drivers/scsi/ipr.c 	trace_entry = &ioa_cfg->trace[trace_index];
ioa_cfg           598 drivers/scsi/ipr.c 	if (ipr_cmd->ioa_cfg->sis64)
ioa_cfg           621 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg           623 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg           625 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg           651 drivers/scsi/ipr.c 	if (ipr_cmd->ioa_cfg->sis64) {
ioa_cfg           718 drivers/scsi/ipr.c struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg           721 drivers/scsi/ipr.c 		__ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
ioa_cfg           737 drivers/scsi/ipr.c static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg           744 drivers/scsi/ipr.c 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
ioa_cfg           745 drivers/scsi/ipr.c 		spin_lock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg           746 drivers/scsi/ipr.c 		ioa_cfg->hrrq[i].allow_interrupts = 0;
ioa_cfg           747 drivers/scsi/ipr.c 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg           751 drivers/scsi/ipr.c 	if (ioa_cfg->sis64)
ioa_cfg           752 drivers/scsi/ipr.c 		writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
ioa_cfg           754 drivers/scsi/ipr.c 		writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
ioa_cfg           757 drivers/scsi/ipr.c 	if (ioa_cfg->sis64)
ioa_cfg           758 drivers/scsi/ipr.c 		writel(~0, ioa_cfg->regs.clr_interrupt_reg);
ioa_cfg           759 drivers/scsi/ipr.c 	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
ioa_cfg           760 drivers/scsi/ipr.c 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
ioa_cfg           770 drivers/scsi/ipr.c static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg           772 drivers/scsi/ipr.c 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
ioa_cfg           777 drivers/scsi/ipr.c 	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
ioa_cfg           778 drivers/scsi/ipr.c 				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
ioa_cfg           779 drivers/scsi/ipr.c 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
ioa_cfg           783 drivers/scsi/ipr.c 	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
ioa_cfg           794 drivers/scsi/ipr.c static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg           796 drivers/scsi/ipr.c 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
ioa_cfg           799 drivers/scsi/ipr.c 		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
ioa_cfg           800 drivers/scsi/ipr.c 					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
ioa_cfg           801 drivers/scsi/ipr.c 			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
ioa_cfg           904 drivers/scsi/ipr.c static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg           910 drivers/scsi/ipr.c 	for_each_hrrq(hrrq, ioa_cfg) {
ioa_cfg           949 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg           952 drivers/scsi/ipr.c 	if (ioa_cfg->sis64) {
ioa_cfg           960 drivers/scsi/ipr.c 		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
ioa_cfg           962 drivers/scsi/ipr.c 		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
ioa_cfg          1035 drivers/scsi/ipr.c 	if (ipr_cmd->ioa_cfg->sis64) {
ioa_cfg          1072 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          1077 drivers/scsi/ipr.c 	spin_unlock_irq(ioa_cfg->host->host_lock);
ioa_cfg          1079 drivers/scsi/ipr.c 	spin_lock_irq(ioa_cfg->host->host_lock);
ioa_cfg          1082 drivers/scsi/ipr.c static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          1086 drivers/scsi/ipr.c 	if (ioa_cfg->hrrq_num == 1)
ioa_cfg          1089 drivers/scsi/ipr.c 		hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
ioa_cfg          1090 drivers/scsi/ipr.c 		hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
ioa_cfg          1108 drivers/scsi/ipr.c static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
ioa_cfg          1114 drivers/scsi/ipr.c 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
ioa_cfg          1115 drivers/scsi/ipr.c 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioa_cfg          1117 drivers/scsi/ipr.c 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
ioa_cfg          1141 drivers/scsi/ipr.c 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
ioa_cfg          1183 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
ioa_cfg          1195 drivers/scsi/ipr.c 	if (ioa_cfg->sis64) {
ioa_cfg          1211 drivers/scsi/ipr.c 			list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
ioa_cfg          1219 drivers/scsi/ipr.c 				res->target = find_first_zero_bit(ioa_cfg->target_ids,
ioa_cfg          1220 drivers/scsi/ipr.c 								  ioa_cfg->max_devs_supported);
ioa_cfg          1221 drivers/scsi/ipr.c 				set_bit(res->target, ioa_cfg->target_ids);
ioa_cfg          1228 drivers/scsi/ipr.c 			res->target = find_first_zero_bit(ioa_cfg->array_ids,
ioa_cfg          1229 drivers/scsi/ipr.c 							  ioa_cfg->max_devs_supported);
ioa_cfg          1230 drivers/scsi/ipr.c 			set_bit(res->target, ioa_cfg->array_ids);
ioa_cfg          1233 drivers/scsi/ipr.c 			res->target = find_first_zero_bit(ioa_cfg->vset_ids,
ioa_cfg          1234 drivers/scsi/ipr.c 							  ioa_cfg->max_devs_supported);
ioa_cfg          1235 drivers/scsi/ipr.c 			set_bit(res->target, ioa_cfg->vset_ids);
ioa_cfg          1237 drivers/scsi/ipr.c 			res->target = find_first_zero_bit(ioa_cfg->target_ids,
ioa_cfg          1238 drivers/scsi/ipr.c 							  ioa_cfg->max_devs_supported);
ioa_cfg          1239 drivers/scsi/ipr.c 			set_bit(res->target, ioa_cfg->target_ids);
ioa_cfg          1270 drivers/scsi/ipr.c 	if (res->ioa_cfg->sis64) {
ioa_cfg          1319 drivers/scsi/ipr.c static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          1325 drivers/scsi/ipr.c 	p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
ioa_cfg          1345 drivers/scsi/ipr.c 	if (res->ioa_cfg->sis64) {
ioa_cfg          1370 drivers/scsi/ipr.c 				    ipr_format_res_path(res->ioa_cfg,
ioa_cfg          1402 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
ioa_cfg          1404 drivers/scsi/ipr.c 	if (!ioa_cfg->sis64)
ioa_cfg          1408 drivers/scsi/ipr.c 		clear_bit(res->target, ioa_cfg->array_ids);
ioa_cfg          1410 drivers/scsi/ipr.c 		clear_bit(res->target, ioa_cfg->vset_ids);
ioa_cfg          1412 drivers/scsi/ipr.c 		list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
ioa_cfg          1415 drivers/scsi/ipr.c 		clear_bit(res->target, ioa_cfg->target_ids);
ioa_cfg          1418 drivers/scsi/ipr.c 		clear_bit(res->target, ioa_cfg->target_ids);
ioa_cfg          1429 drivers/scsi/ipr.c static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          1438 drivers/scsi/ipr.c 	if (ioa_cfg->sis64) {
ioa_cfg          1446 drivers/scsi/ipr.c 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
ioa_cfg          1454 drivers/scsi/ipr.c 		if (list_empty(&ioa_cfg->free_res_q)) {
ioa_cfg          1455 drivers/scsi/ipr.c 			ipr_send_hcam(ioa_cfg,
ioa_cfg          1461 drivers/scsi/ipr.c 		res = list_entry(ioa_cfg->free_res_q.next,
ioa_cfg          1466 drivers/scsi/ipr.c 		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
ioa_cfg          1475 drivers/scsi/ipr.c 			schedule_work(&ioa_cfg->work_q);
ioa_cfg          1478 drivers/scsi/ipr.c 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
ioa_cfg          1482 drivers/scsi/ipr.c 		schedule_work(&ioa_cfg->work_q);
ioa_cfg          1485 drivers/scsi/ipr.c 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
ioa_cfg          1500 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          1510 drivers/scsi/ipr.c 			dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          1513 drivers/scsi/ipr.c 		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
ioa_cfg          1515 drivers/scsi/ipr.c 		ipr_handle_config_change(ioa_cfg, hostrcb);
ioa_cfg          1628 drivers/scsi/ipr.c static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          1633 drivers/scsi/ipr.c 	if (ioa_cfg->sis64)
ioa_cfg          1664 drivers/scsi/ipr.c static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          1696 drivers/scsi/ipr.c static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          1714 drivers/scsi/ipr.c 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
ioa_cfg          1736 drivers/scsi/ipr.c static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          1779 drivers/scsi/ipr.c static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          1797 drivers/scsi/ipr.c 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
ioa_cfg          1826 drivers/scsi/ipr.c static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          1840 drivers/scsi/ipr.c 		ioa_cfg->host->host_no,
ioa_cfg          1861 drivers/scsi/ipr.c 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
ioa_cfg          1862 drivers/scsi/ipr.c 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
ioa_cfg          1877 drivers/scsi/ipr.c static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          1891 drivers/scsi/ipr.c 		ioa_cfg->host->host_no,
ioa_cfg          1911 drivers/scsi/ipr.c 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
ioa_cfg          1912 drivers/scsi/ipr.c 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
ioa_cfg          1933 drivers/scsi/ipr.c static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
ioa_cfg          1940 drivers/scsi/ipr.c 	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
ioa_cfg          1960 drivers/scsi/ipr.c static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          1965 drivers/scsi/ipr.c 	if (ioa_cfg->sis64)
ioa_cfg          1976 drivers/scsi/ipr.c 	ipr_log_hex_data(ioa_cfg, error->data,
ioa_cfg          1990 drivers/scsi/ipr.c static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          2002 drivers/scsi/ipr.c 	ipr_log_hex_data(ioa_cfg, error->data,
ioa_cfg          2103 drivers/scsi/ipr.c 				     ipr_format_res_path(hostrcb->ioa_cfg,
ioa_cfg          2111 drivers/scsi/ipr.c 		ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
ioa_cfg          2253 drivers/scsi/ipr.c 				     ipr_format_res_path(hostrcb->ioa_cfg,
ioa_cfg          2263 drivers/scsi/ipr.c 		     ipr_format_res_path(hostrcb->ioa_cfg,
ioa_cfg          2277 drivers/scsi/ipr.c static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          2303 drivers/scsi/ipr.c 	ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
ioa_cfg          2314 drivers/scsi/ipr.c static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          2329 drivers/scsi/ipr.c 		ipr_format_res_path(ioa_cfg, error->last_res_path,
ioa_cfg          2351 drivers/scsi/ipr.c 			 ipr_format_res_path(ioa_cfg, array_entry->res_path,
ioa_cfg          2354 drivers/scsi/ipr.c 			 ipr_format_res_path(ioa_cfg,
ioa_cfg          2370 drivers/scsi/ipr.c static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          2397 drivers/scsi/ipr.c 	ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
ioa_cfg          2408 drivers/scsi/ipr.c static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          2417 drivers/scsi/ipr.c 	ipr_log_hex_data(ioa_cfg, error->data,
ioa_cfg          2430 drivers/scsi/ipr.c static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          2433 drivers/scsi/ipr.c 	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
ioa_cfg          2445 drivers/scsi/ipr.c static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          2465 drivers/scsi/ipr.c 	ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
ioa_cfg          2467 drivers/scsi/ipr.c 	ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
ioa_cfg          2470 drivers/scsi/ipr.c 	ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
ioa_cfg          2505 drivers/scsi/ipr.c static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          2516 drivers/scsi/ipr.c 		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
ioa_cfg          2518 drivers/scsi/ipr.c 	if (ioa_cfg->sis64)
ioa_cfg          2523 drivers/scsi/ipr.c 	if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
ioa_cfg          2526 drivers/scsi/ipr.c 		scsi_report_bus_reset(ioa_cfg->host,
ioa_cfg          2540 drivers/scsi/ipr.c 			ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
ioa_cfg          2547 drivers/scsi/ipr.c 	ioa_cfg->errors_logged++;
ioa_cfg          2549 drivers/scsi/ipr.c 	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
ioa_cfg          2556 drivers/scsi/ipr.c 		ipr_log_cache_error(ioa_cfg, hostrcb);
ioa_cfg          2559 drivers/scsi/ipr.c 		ipr_log_config_error(ioa_cfg, hostrcb);
ioa_cfg          2563 drivers/scsi/ipr.c 		ipr_log_array_error(ioa_cfg, hostrcb);
ioa_cfg          2566 drivers/scsi/ipr.c 		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
ioa_cfg          2569 drivers/scsi/ipr.c 		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
ioa_cfg          2572 drivers/scsi/ipr.c 		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
ioa_cfg          2576 drivers/scsi/ipr.c 		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
ioa_cfg          2579 drivers/scsi/ipr.c 		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
ioa_cfg          2582 drivers/scsi/ipr.c 		ipr_log_fabric_error(ioa_cfg, hostrcb);
ioa_cfg          2585 drivers/scsi/ipr.c 		ipr_log_sis64_device_error(ioa_cfg, hostrcb);
ioa_cfg          2588 drivers/scsi/ipr.c 		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
ioa_cfg          2592 drivers/scsi/ipr.c 		ipr_log_sis64_array_error(ioa_cfg, hostrcb);
ioa_cfg          2595 drivers/scsi/ipr.c 		ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
ioa_cfg          2598 drivers/scsi/ipr.c 		ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
ioa_cfg          2603 drivers/scsi/ipr.c 		ipr_log_generic_error(ioa_cfg, hostrcb);
ioa_cfg          2638 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          2643 drivers/scsi/ipr.c 	if (ioa_cfg->sis64)
ioa_cfg          2652 drivers/scsi/ipr.c 		ipr_handle_log_data(ioa_cfg, hostrcb);
ioa_cfg          2654 drivers/scsi/ipr.c 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
ioa_cfg          2657 drivers/scsi/ipr.c 		dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          2661 drivers/scsi/ipr.c 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
ioa_cfg          2662 drivers/scsi/ipr.c 	schedule_work(&ioa_cfg->work_q);
ioa_cfg          2663 drivers/scsi/ipr.c 	hostrcb = ipr_get_free_hostrcb(ioa_cfg);
ioa_cfg          2665 drivers/scsi/ipr.c 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
ioa_cfg          2682 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          2685 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          2687 drivers/scsi/ipr.c 	ioa_cfg->errors_logged++;
ioa_cfg          2688 drivers/scsi/ipr.c 	dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          2691 drivers/scsi/ipr.c 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
ioa_cfg          2692 drivers/scsi/ipr.c 		ioa_cfg->sdt_state = GET_DUMP;
ioa_cfg          2694 drivers/scsi/ipr.c 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
ioa_cfg          2695 drivers/scsi/ipr.c 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
ioa_cfg          2697 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          2715 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          2718 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          2720 drivers/scsi/ipr.c 	ioa_cfg->errors_logged++;
ioa_cfg          2721 drivers/scsi/ipr.c 	dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          2724 drivers/scsi/ipr.c 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
ioa_cfg          2725 drivers/scsi/ipr.c 		ioa_cfg->sdt_state = GET_DUMP;
ioa_cfg          2727 drivers/scsi/ipr.c 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
ioa_cfg          2729 drivers/scsi/ipr.c 			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
ioa_cfg          2730 drivers/scsi/ipr.c 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
ioa_cfg          2733 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          2782 drivers/scsi/ipr.c static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
ioa_cfg          2789 drivers/scsi/ipr.c 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
ioa_cfg          2815 drivers/scsi/ipr.c static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
ioa_cfg          2822 drivers/scsi/ipr.c 		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
ioa_cfg          2848 drivers/scsi/ipr.c static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          2855 drivers/scsi/ipr.c 		writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
ioa_cfg          2856 drivers/scsi/ipr.c 		*dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
ioa_cfg          2873 drivers/scsi/ipr.c static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          2880 drivers/scsi/ipr.c 	if (ioa_cfg->sis64)
ioa_cfg          2881 drivers/scsi/ipr.c 		return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
ioa_cfg          2886 drivers/scsi/ipr.c 	       ioa_cfg->regs.set_uproc_interrupt_reg32);
ioa_cfg          2889 drivers/scsi/ipr.c 	if (ipr_wait_iodbg_ack(ioa_cfg,
ioa_cfg          2891 drivers/scsi/ipr.c 		dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          2898 drivers/scsi/ipr.c 	       ioa_cfg->regs.clr_interrupt_reg);
ioa_cfg          2901 drivers/scsi/ipr.c 	writel(start_addr, ioa_cfg->ioa_mailbox);
ioa_cfg          2905 drivers/scsi/ipr.c 	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
ioa_cfg          2909 drivers/scsi/ipr.c 		if (ipr_wait_iodbg_ack(ioa_cfg,
ioa_cfg          2911 drivers/scsi/ipr.c 			dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          2917 drivers/scsi/ipr.c 		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
ioa_cfg          2924 drivers/scsi/ipr.c 			       ioa_cfg->regs.clr_interrupt_reg);
ioa_cfg          2930 drivers/scsi/ipr.c 	       ioa_cfg->regs.set_uproc_interrupt_reg32);
ioa_cfg          2933 drivers/scsi/ipr.c 	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
ioa_cfg          2937 drivers/scsi/ipr.c 	       ioa_cfg->regs.clr_interrupt_reg);
ioa_cfg          2942 drivers/scsi/ipr.c 		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ioa_cfg          2966 drivers/scsi/ipr.c static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          2973 drivers/scsi/ipr.c 	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
ioa_cfg          2975 drivers/scsi/ipr.c 	if (ioa_cfg->sis64)
ioa_cfg          3001 drivers/scsi/ipr.c 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3002 drivers/scsi/ipr.c 		if (ioa_cfg->sdt_state == ABORT_DUMP) {
ioa_cfg          3005 drivers/scsi/ipr.c 			rc = ipr_get_ldump_data_section(ioa_cfg,
ioa_cfg          3010 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3048 drivers/scsi/ipr.c static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          3051 drivers/scsi/ipr.c 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ioa_cfg          3059 drivers/scsi/ipr.c 	driver_dump->ioa_type_entry.type = ioa_cfg->type;
ioa_cfg          3074 drivers/scsi/ipr.c static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          3095 drivers/scsi/ipr.c static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          3104 drivers/scsi/ipr.c 	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
ioa_cfg          3116 drivers/scsi/ipr.c static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          3125 drivers/scsi/ipr.c 	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
ioa_cfg          3137 drivers/scsi/ipr.c static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
ioa_cfg          3151 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3153 drivers/scsi/ipr.c 	if (ioa_cfg->sdt_state != READ_DUMP) {
ioa_cfg          3154 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3158 drivers/scsi/ipr.c 	if (ioa_cfg->sis64) {
ioa_cfg          3159 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3161 drivers/scsi/ipr.c 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3164 drivers/scsi/ipr.c 	start_addr = readl(ioa_cfg->ioa_mailbox);
ioa_cfg          3166 drivers/scsi/ipr.c 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
ioa_cfg          3167 drivers/scsi/ipr.c 		dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          3169 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3173 drivers/scsi/ipr.c 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
ioa_cfg          3185 drivers/scsi/ipr.c 	ipr_dump_version_data(ioa_cfg, driver_dump);
ioa_cfg          3186 drivers/scsi/ipr.c 	ipr_dump_location_data(ioa_cfg, driver_dump);
ioa_cfg          3187 drivers/scsi/ipr.c 	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
ioa_cfg          3188 drivers/scsi/ipr.c 	ipr_dump_trace_data(ioa_cfg, driver_dump);
ioa_cfg          3205 drivers/scsi/ipr.c 	if (ioa_cfg->sis64) {
ioa_cfg          3215 drivers/scsi/ipr.c 	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
ioa_cfg          3221 drivers/scsi/ipr.c 		dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          3225 drivers/scsi/ipr.c 		ioa_cfg->sdt_state = DUMP_OBTAINED;
ioa_cfg          3226 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3237 drivers/scsi/ipr.c 	if (ioa_cfg->sis64)
ioa_cfg          3242 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3252 drivers/scsi/ipr.c 			if (ioa_cfg->sis64)
ioa_cfg          3270 drivers/scsi/ipr.c 				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
ioa_cfg          3283 drivers/scsi/ipr.c 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
ioa_cfg          3288 drivers/scsi/ipr.c 	ioa_cfg->sdt_state = DUMP_OBTAINED;
ioa_cfg          3293 drivers/scsi/ipr.c #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
ioa_cfg          3306 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
ioa_cfg          3311 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3312 drivers/scsi/ipr.c 	ioa_cfg->dump = NULL;
ioa_cfg          3313 drivers/scsi/ipr.c 	ioa_cfg->sdt_state = INACTIVE;
ioa_cfg          3314 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3329 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg =
ioa_cfg          3335 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3340 drivers/scsi/ipr.c 		if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
ioa_cfg          3341 drivers/scsi/ipr.c 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3345 drivers/scsi/ipr.c 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
ioa_cfg          3351 drivers/scsi/ipr.c 						list_move_tail(&res->queue, &ioa_cfg->free_res_q);
ioa_cfg          3354 drivers/scsi/ipr.c 					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3357 drivers/scsi/ipr.c 					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3364 drivers/scsi/ipr.c 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
ioa_cfg          3370 drivers/scsi/ipr.c 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3371 drivers/scsi/ipr.c 			scsi_add_device(ioa_cfg->host, bus, target, lun);
ioa_cfg          3372 drivers/scsi/ipr.c 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3377 drivers/scsi/ipr.c 	ioa_cfg->scan_done = 1;
ioa_cfg          3378 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3379 drivers/scsi/ipr.c 	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
ioa_cfg          3398 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg =
ioa_cfg          3402 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3404 drivers/scsi/ipr.c 	if (ioa_cfg->sdt_state == READ_DUMP) {
ioa_cfg          3405 drivers/scsi/ipr.c 		dump = ioa_cfg->dump;
ioa_cfg          3407 drivers/scsi/ipr.c 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3411 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3412 drivers/scsi/ipr.c 		ipr_get_ioa_dump(ioa_cfg, dump);
ioa_cfg          3415 drivers/scsi/ipr.c 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3416 drivers/scsi/ipr.c 		if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
ioa_cfg          3417 drivers/scsi/ipr.c 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
ioa_cfg          3418 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3422 drivers/scsi/ipr.c 	if (ioa_cfg->scsi_unblock) {
ioa_cfg          3423 drivers/scsi/ipr.c 		ioa_cfg->scsi_unblock = 0;
ioa_cfg          3424 drivers/scsi/ipr.c 		ioa_cfg->scsi_blocked = 0;
ioa_cfg          3425 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3426 drivers/scsi/ipr.c 		scsi_unblock_requests(ioa_cfg->host);
ioa_cfg          3427 drivers/scsi/ipr.c 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3428 drivers/scsi/ipr.c 		if (ioa_cfg->scsi_blocked)
ioa_cfg          3429 drivers/scsi/ipr.c 			scsi_block_requests(ioa_cfg->host);
ioa_cfg          3432 drivers/scsi/ipr.c 	if (!ioa_cfg->scan_enabled) {
ioa_cfg          3433 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3437 drivers/scsi/ipr.c 	schedule_work(&ioa_cfg->scsi_add_work_q);
ioa_cfg          3439 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3462 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          3466 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3467 drivers/scsi/ipr.c 	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
ioa_cfg          3469 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3496 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          3497 drivers/scsi/ipr.c 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ioa_cfg          3501 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3506 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3530 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          3534 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3535 drivers/scsi/ipr.c 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
ioa_cfg          3536 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3553 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          3556 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3557 drivers/scsi/ipr.c 	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
ioa_cfg          3558 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3588 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          3595 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3596 drivers/scsi/ipr.c 	while (ioa_cfg->in_reset_reload) {
ioa_cfg          3597 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3598 drivers/scsi/ipr.c 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
ioa_cfg          3599 drivers/scsi/ipr.c 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3602 drivers/scsi/ipr.c 	ioa_cfg->errors_logged = 0;
ioa_cfg          3603 drivers/scsi/ipr.c 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
ioa_cfg          3605 drivers/scsi/ipr.c 	if (ioa_cfg->in_reset_reload) {
ioa_cfg          3606 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3607 drivers/scsi/ipr.c 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
ioa_cfg          3612 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3616 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3617 drivers/scsi/ipr.c 	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
ioa_cfg          3619 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3644 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          3648 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3649 drivers/scsi/ipr.c 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
ioa_cfg          3653 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3673 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          3680 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3681 drivers/scsi/ipr.c 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
ioa_cfg          3683 drivers/scsi/ipr.c 		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
ioa_cfg          3684 drivers/scsi/ipr.c 			spin_lock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          3685 drivers/scsi/ipr.c 			ioa_cfg->hrrq[i].ioa_is_dead = 0;
ioa_cfg          3686 drivers/scsi/ipr.c 			spin_unlock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          3689 drivers/scsi/ipr.c 		ioa_cfg->reset_retries = 0;
ioa_cfg          3690 drivers/scsi/ipr.c 		ioa_cfg->in_ioa_bringdown = 0;
ioa_cfg          3691 drivers/scsi/ipr.c 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
ioa_cfg          3693 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3694 drivers/scsi/ipr.c 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
ioa_cfg          3724 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          3731 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3732 drivers/scsi/ipr.c 	if (!ioa_cfg->in_reset_reload)
ioa_cfg          3733 drivers/scsi/ipr.c 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
ioa_cfg          3734 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          3735 drivers/scsi/ipr.c 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
ioa_cfg          3761 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          3766 drivers/scsi/ipr.c 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
ioa_cfg          3785 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          3790 drivers/scsi/ipr.c 	if (!ioa_cfg->sis64) {
ioa_cfg          3791 drivers/scsi/ipr.c 		dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
ioa_cfg          3798 drivers/scsi/ipr.c 		dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
ioa_cfg          3802 drivers/scsi/ipr.c 	if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
ioa_cfg          3803 drivers/scsi/ipr.c 		dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
ioa_cfg          3807 drivers/scsi/ipr.c 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
ioa_cfg          3808 drivers/scsi/ipr.c 		for (i = 1; i < ioa_cfg->hrrq_num; i++)
ioa_cfg          3809 drivers/scsi/ipr.c 			irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
ioa_cfg          3813 drivers/scsi/ipr.c 	ioa_cfg->iopoll_weight = user_iopoll_weight;
ioa_cfg          3814 drivers/scsi/ipr.c 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
ioa_cfg          3815 drivers/scsi/ipr.c 		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
ioa_cfg          3816 drivers/scsi/ipr.c 			irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
ioa_cfg          3817 drivers/scsi/ipr.c 					ioa_cfg->iopoll_weight, ipr_iopoll);
ioa_cfg          4019 drivers/scsi/ipr.c static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          4024 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4025 drivers/scsi/ipr.c 	while (ioa_cfg->in_reset_reload) {
ioa_cfg          4026 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4027 drivers/scsi/ipr.c 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
ioa_cfg          4028 drivers/scsi/ipr.c 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4031 drivers/scsi/ipr.c 	if (ioa_cfg->ucode_sglist) {
ioa_cfg          4032 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4033 drivers/scsi/ipr.c 		dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          4038 drivers/scsi/ipr.c 	sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
ioa_cfg          4043 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4044 drivers/scsi/ipr.c 		dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          4049 drivers/scsi/ipr.c 	ioa_cfg->ucode_sglist = sglist;
ioa_cfg          4050 drivers/scsi/ipr.c 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
ioa_cfg          4051 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4052 drivers/scsi/ipr.c 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
ioa_cfg          4054 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4055 drivers/scsi/ipr.c 	ioa_cfg->ucode_sglist = NULL;
ioa_cfg          4056 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4076 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          4094 drivers/scsi/ipr.c 	if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
ioa_cfg          4095 drivers/scsi/ipr.c 		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
ioa_cfg          4106 drivers/scsi/ipr.c 		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
ioa_cfg          4114 drivers/scsi/ipr.c 		dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          4121 drivers/scsi/ipr.c 	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
ioa_cfg          4151 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          4155 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4156 drivers/scsi/ipr.c 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
ioa_cfg          4157 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4175 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          4180 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4181 drivers/scsi/ipr.c 	hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
ioa_cfg          4184 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4189 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4199 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          4203 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4204 drivers/scsi/ipr.c 	hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
ioa_cfg          4207 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4212 drivers/scsi/ipr.c 	list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
ioa_cfg          4213 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4258 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          4268 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4269 drivers/scsi/ipr.c 	dump = ioa_cfg->dump;
ioa_cfg          4271 drivers/scsi/ipr.c 	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
ioa_cfg          4272 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4276 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4302 drivers/scsi/ipr.c 	if (ioa_cfg->sis64)
ioa_cfg          4348 drivers/scsi/ipr.c static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          4361 drivers/scsi/ipr.c 	if (ioa_cfg->sis64)
ioa_cfg          4377 drivers/scsi/ipr.c 	dump->ioa_cfg = ioa_cfg;
ioa_cfg          4379 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4381 drivers/scsi/ipr.c 	if (INACTIVE != ioa_cfg->sdt_state) {
ioa_cfg          4382 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4388 drivers/scsi/ipr.c 	ioa_cfg->dump = dump;
ioa_cfg          4389 drivers/scsi/ipr.c 	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
ioa_cfg          4390 drivers/scsi/ipr.c 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
ioa_cfg          4391 drivers/scsi/ipr.c 		ioa_cfg->dump_taken = 1;
ioa_cfg          4392 drivers/scsi/ipr.c 		schedule_work(&ioa_cfg->work_q);
ioa_cfg          4394 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4406 drivers/scsi/ipr.c static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          4413 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4414 drivers/scsi/ipr.c 	dump = ioa_cfg->dump;
ioa_cfg          4416 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4420 drivers/scsi/ipr.c 	ioa_cfg->dump = NULL;
ioa_cfg          4421 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4447 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          4454 drivers/scsi/ipr.c 		rc = ipr_alloc_dump(ioa_cfg);
ioa_cfg          4456 drivers/scsi/ipr.c 		rc = ipr_free_dump(ioa_cfg);
ioa_cfg          4476 drivers/scsi/ipr.c static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
ioa_cfg          4490 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
ioa_cfg          4494 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4499 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4517 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
ioa_cfg          4522 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4526 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4551 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
ioa_cfg          4557 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4559 drivers/scsi/ipr.c 	if (res && ioa_cfg->sis64)
ioa_cfg          4564 drivers/scsi/ipr.c 		len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
ioa_cfg          4567 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4591 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
ioa_cfg          4596 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4598 drivers/scsi/ipr.c 	if (res && ioa_cfg->sis64)
ioa_cfg          4603 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4627 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
ioa_cfg          4632 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4638 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4662 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
ioa_cfg          4667 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4673 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4690 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
ioa_cfg          4695 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4708 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4775 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
ioa_cfg          4778 drivers/scsi/ipr.c 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
ioa_cfg          4803 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
ioa_cfg          4809 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4814 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4819 drivers/scsi/ipr.c 		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
ioa_cfg          4821 drivers/scsi/ipr.c 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4822 drivers/scsi/ipr.c 			sata_port->ioa_cfg = ioa_cfg;
ioa_cfg          4834 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4851 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
ioa_cfg          4853 drivers/scsi/ipr.c 	if (ioa_cfg->sis64) {
ioa_cfg          4856 drivers/scsi/ipr.c 				clear_bit(starget->id, ioa_cfg->array_ids);
ioa_cfg          4858 drivers/scsi/ipr.c 				clear_bit(starget->id, ioa_cfg->vset_ids);
ioa_cfg          4860 drivers/scsi/ipr.c 				clear_bit(starget->id, ioa_cfg->target_ids);
ioa_cfg          4880 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
ioa_cfg          4883 drivers/scsi/ipr.c 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
ioa_cfg          4903 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          4906 drivers/scsi/ipr.c 	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
ioa_cfg          4908 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4917 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4931 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
ioa_cfg          4937 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4955 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          4962 drivers/scsi/ipr.c 		if (ioa_cfg->sis64)
ioa_cfg          4964 drivers/scsi/ipr.c 				    ipr_format_res_path(ioa_cfg,
ioa_cfg          4968 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5017 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
ioa_cfg          5024 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5036 drivers/scsi/ipr.c 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5041 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5106 drivers/scsi/ipr.c static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
ioa_cfg          5120 drivers/scsi/ipr.c 		for_each_hrrq(hrrq, ioa_cfg) {
ioa_cfg          5123 drivers/scsi/ipr.c 				ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
ioa_cfg          5140 drivers/scsi/ipr.c 				for_each_hrrq(hrrq, ioa_cfg) {
ioa_cfg          5143 drivers/scsi/ipr.c 						ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
ioa_cfg          5155 drivers/scsi/ipr.c 					dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
ioa_cfg          5168 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          5173 drivers/scsi/ipr.c 	ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
ioa_cfg          5174 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5176 drivers/scsi/ipr.c 	if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
ioa_cfg          5177 drivers/scsi/ipr.c 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
ioa_cfg          5178 drivers/scsi/ipr.c 		dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          5181 drivers/scsi/ipr.c 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
ioa_cfg          5182 drivers/scsi/ipr.c 			ioa_cfg->sdt_state = GET_DUMP;
ioa_cfg          5185 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5186 drivers/scsi/ipr.c 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
ioa_cfg          5187 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5191 drivers/scsi/ipr.c 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
ioa_cfg          5196 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5215 drivers/scsi/ipr.c static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          5225 drivers/scsi/ipr.c 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioa_cfg          5229 drivers/scsi/ipr.c 	if (ipr_cmd->ioa_cfg->sis64) {
ioa_cfg          5248 drivers/scsi/ipr.c 		if (ipr_cmd->ioa_cfg->sis64)
ioa_cfg          5274 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
ioa_cfg          5280 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5281 drivers/scsi/ipr.c 	while (ioa_cfg->in_reset_reload) {
ioa_cfg          5282 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5283 drivers/scsi/ipr.c 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
ioa_cfg          5284 drivers/scsi/ipr.c 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5289 drivers/scsi/ipr.c 		rc = ipr_device_reset(ioa_cfg, res);
ioa_cfg          5291 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5293 drivers/scsi/ipr.c 		ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
ioa_cfg          5295 drivers/scsi/ipr.c 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5296 drivers/scsi/ipr.c 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
ioa_cfg          5297 drivers/scsi/ipr.c 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5299 drivers/scsi/ipr.c 			wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
ioa_cfg          5302 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5322 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          5329 drivers/scsi/ipr.c 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
ioa_cfg          5337 drivers/scsi/ipr.c 	if (ioa_cfg->in_reset_reload)
ioa_cfg          5339 drivers/scsi/ipr.c 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
ioa_cfg          5342 drivers/scsi/ipr.c 	for_each_hrrq(hrrq, ioa_cfg) {
ioa_cfg          5345 drivers/scsi/ipr.c 			ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
ioa_cfg          5371 drivers/scsi/ipr.c 		rc = ipr_device_reset(ioa_cfg, res);
ioa_cfg          5382 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          5385 drivers/scsi/ipr.c 	ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
ioa_cfg          5397 drivers/scsi/ipr.c 			rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
ioa_cfg          5399 drivers/scsi/ipr.c 			rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
ioa_cfg          5416 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          5420 drivers/scsi/ipr.c 	if (!ioa_cfg->sis64)
ioa_cfg          5421 drivers/scsi/ipr.c 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
ioa_cfg          5423 drivers/scsi/ipr.c 				scsi_report_bus_reset(ioa_cfg->host, res->bus);
ioa_cfg          5456 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          5461 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5462 drivers/scsi/ipr.c 	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
ioa_cfg          5463 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5468 drivers/scsi/ipr.c 	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioa_cfg          5478 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          5494 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          5502 drivers/scsi/ipr.c 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
ioa_cfg          5509 drivers/scsi/ipr.c 	if (ioa_cfg->in_reset_reload ||
ioa_cfg          5510 drivers/scsi/ipr.c 	    ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
ioa_cfg          5520 drivers/scsi/ipr.c 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
ioa_cfg          5525 drivers/scsi/ipr.c 	for_each_hrrq(hrrq, ioa_cfg) {
ioa_cfg          5528 drivers/scsi/ipr.c 			if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
ioa_cfg          5529 drivers/scsi/ipr.c 				if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
ioa_cfg          5541 drivers/scsi/ipr.c 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioa_cfg          5580 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
ioa_cfg          5584 drivers/scsi/ipr.c 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
ioa_cfg          5586 drivers/scsi/ipr.c 	if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
ioa_cfg          5603 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          5607 drivers/scsi/ipr.c 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
ioa_cfg          5614 drivers/scsi/ipr.c 		rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
ioa_cfg          5627 drivers/scsi/ipr.c static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          5633 drivers/scsi/ipr.c 	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
ioa_cfg          5640 drivers/scsi/ipr.c 		if (ioa_cfg->sis64) {
ioa_cfg          5641 drivers/scsi/ipr.c 			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
ioa_cfg          5642 drivers/scsi/ipr.c 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
ioa_cfg          5646 drivers/scsi/ipr.c 				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
ioa_cfg          5647 drivers/scsi/ipr.c 				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
ioa_cfg          5648 drivers/scsi/ipr.c 				list_del(&ioa_cfg->reset_cmd->queue);
ioa_cfg          5649 drivers/scsi/ipr.c 				del_timer(&ioa_cfg->reset_cmd->timer);
ioa_cfg          5650 drivers/scsi/ipr.c 				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
ioa_cfg          5660 drivers/scsi/ipr.c 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
ioa_cfg          5661 drivers/scsi/ipr.c 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
ioa_cfg          5663 drivers/scsi/ipr.c 		list_del(&ioa_cfg->reset_cmd->queue);
ioa_cfg          5664 drivers/scsi/ipr.c 		del_timer(&ioa_cfg->reset_cmd->timer);
ioa_cfg          5665 drivers/scsi/ipr.c 		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
ioa_cfg          5667 drivers/scsi/ipr.c 		if (ioa_cfg->clear_isr) {
ioa_cfg          5669 drivers/scsi/ipr.c 				dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          5671 drivers/scsi/ipr.c 			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
ioa_cfg          5672 drivers/scsi/ipr.c 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
ioa_cfg          5677 drivers/scsi/ipr.c 			ioa_cfg->ioa_unit_checked = 1;
ioa_cfg          5679 drivers/scsi/ipr.c 			dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          5682 drivers/scsi/ipr.c 			dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          5685 drivers/scsi/ipr.c 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
ioa_cfg          5686 drivers/scsi/ipr.c 			ioa_cfg->sdt_state = GET_DUMP;
ioa_cfg          5688 drivers/scsi/ipr.c 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
ioa_cfg          5689 drivers/scsi/ipr.c 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
ioa_cfg          5703 drivers/scsi/ipr.c static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
ioa_cfg          5705 drivers/scsi/ipr.c 	ioa_cfg->errors_logged++;
ioa_cfg          5706 drivers/scsi/ipr.c 	dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
ioa_cfg          5708 drivers/scsi/ipr.c 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
ioa_cfg          5709 drivers/scsi/ipr.c 		ioa_cfg->sdt_state = GET_DUMP;
ioa_cfg          5711 drivers/scsi/ipr.c 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
ioa_cfg          5720 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
ioa_cfg          5736 drivers/scsi/ipr.c 			ipr_isr_eh(ioa_cfg,
ioa_cfg          5742 drivers/scsi/ipr.c 		ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
ioa_cfg          5765 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          5773 drivers/scsi/ipr.c 	ioa_cfg = hrrq->ioa_cfg;
ioa_cfg          5802 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
ioa_cfg          5822 drivers/scsi/ipr.c 			if (!ioa_cfg->clear_isr)
ioa_cfg          5829 drivers/scsi/ipr.c 				     ioa_cfg->regs.clr_interrupt_reg32);
ioa_cfg          5830 drivers/scsi/ipr.c 				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
ioa_cfg          5835 drivers/scsi/ipr.c 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
ioa_cfg          5839 drivers/scsi/ipr.c 			ipr_isr_eh(ioa_cfg,
ioa_cfg          5848 drivers/scsi/ipr.c 		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
ioa_cfg          5870 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
ioa_cfg          5884 drivers/scsi/ipr.c 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
ioa_cfg          5917 drivers/scsi/ipr.c static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          5935 drivers/scsi/ipr.c 			dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
ioa_cfg          5969 drivers/scsi/ipr.c static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          5986 drivers/scsi/ipr.c 		dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
ioa_cfg          6100 drivers/scsi/ipr.c 	if (ipr_cmd->ioa_cfg->sis64)
ioa_cfg          6214 drivers/scsi/ipr.c static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          6230 drivers/scsi/ipr.c 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
ioa_cfg          6238 drivers/scsi/ipr.c 	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
ioa_cfg          6250 drivers/scsi/ipr.c 	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
ioa_cfg          6253 drivers/scsi/ipr.c 	if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
ioa_cfg          6255 drivers/scsi/ipr.c 	else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
ioa_cfg          6378 drivers/scsi/ipr.c 	if (ipr_cmd->ioa_cfg->sis64)
ioa_cfg          6400 drivers/scsi/ipr.c static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          6416 drivers/scsi/ipr.c 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
ioa_cfg          6455 drivers/scsi/ipr.c 			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
ioa_cfg          6509 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          6526 drivers/scsi/ipr.c 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          6528 drivers/scsi/ipr.c 		ipr_erp_start(ioa_cfg, ipr_cmd);
ioa_cfg          6530 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          6549 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          6558 drivers/scsi/ipr.c 	ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
ioa_cfg          6564 drivers/scsi/ipr.c 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          6566 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          6570 drivers/scsi/ipr.c 	hrrq_id = ipr_get_hrrq_index(ioa_cfg);
ioa_cfg          6571 drivers/scsi/ipr.c 	hrrq = &ioa_cfg->hrrq[hrrq_id];
ioa_cfg          6638 drivers/scsi/ipr.c 	if (ioa_cfg->sis64)
ioa_cfg          6639 drivers/scsi/ipr.c 		rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
ioa_cfg          6641 drivers/scsi/ipr.c 		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
ioa_cfg          6713 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          6716 drivers/scsi/ipr.c 	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
ioa_cfg          6719 drivers/scsi/ipr.c 	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
ioa_cfg          6762 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
ioa_cfg          6766 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
ioa_cfg          6767 drivers/scsi/ipr.c 	while (ioa_cfg->in_reset_reload) {
ioa_cfg          6768 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
ioa_cfg          6769 drivers/scsi/ipr.c 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
ioa_cfg          6770 drivers/scsi/ipr.c 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
ioa_cfg          6773 drivers/scsi/ipr.c 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
ioa_cfg          6776 drivers/scsi/ipr.c 	rc = ipr_device_reset(ioa_cfg, res);
ioa_cfg          6788 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
ioa_cfg          6802 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
ioa_cfg          6807 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
ioa_cfg          6808 drivers/scsi/ipr.c 	while (ioa_cfg->in_reset_reload) {
ioa_cfg          6809 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
ioa_cfg          6810 drivers/scsi/ipr.c 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
ioa_cfg          6811 drivers/scsi/ipr.c 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
ioa_cfg          6814 drivers/scsi/ipr.c 	for_each_hrrq(hrrq, ioa_cfg) {
ioa_cfg          6818 drivers/scsi/ipr.c 				ipr_device_reset(ioa_cfg, sata_port->res);
ioa_cfg          6824 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
ioa_cfg          6865 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          6872 drivers/scsi/ipr.c 	if (ipr_cmd->ioa_cfg->sis64)
ioa_cfg          6878 drivers/scsi/ipr.c 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
ioa_cfg          6881 drivers/scsi/ipr.c 		scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
ioa_cfg          6994 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
ioa_cfg          6999 drivers/scsi/ipr.c 	hrrq_id = ipr_get_hrrq_index(ioa_cfg);
ioa_cfg          7000 drivers/scsi/ipr.c 	hrrq = &ioa_cfg->hrrq[hrrq_id];
ioa_cfg          7037 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
ioa_cfg          7061 drivers/scsi/ipr.c 	if (ioa_cfg->sis64) {
ioa_cfg          7079 drivers/scsi/ipr.c 	if (ioa_cfg->sis64)
ioa_cfg          7192 drivers/scsi/ipr.c static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          7196 drivers/scsi/ipr.c 	if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
ioa_cfg          7205 drivers/scsi/ipr.c #define ipr_invalid_adapter(ioa_cfg) 0
ioa_cfg          7220 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          7224 drivers/scsi/ipr.c 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
ioa_cfg          7226 drivers/scsi/ipr.c 		ioa_cfg->scsi_unblock = 1;
ioa_cfg          7227 drivers/scsi/ipr.c 		schedule_work(&ioa_cfg->work_q);
ioa_cfg          7230 drivers/scsi/ipr.c 	ioa_cfg->in_reset_reload = 0;
ioa_cfg          7231 drivers/scsi/ipr.c 	ioa_cfg->reset_retries = 0;
ioa_cfg          7232 drivers/scsi/ipr.c 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
ioa_cfg          7233 drivers/scsi/ipr.c 		spin_lock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          7234 drivers/scsi/ipr.c 		ioa_cfg->hrrq[i].ioa_is_dead = 1;
ioa_cfg          7235 drivers/scsi/ipr.c 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          7240 drivers/scsi/ipr.c 	wake_up_all(&ioa_cfg->reset_wait_q);
ioa_cfg          7259 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          7264 drivers/scsi/ipr.c 	ioa_cfg->in_reset_reload = 0;
ioa_cfg          7265 drivers/scsi/ipr.c 	for (j = 0; j < ioa_cfg->hrrq_num; j++) {
ioa_cfg          7266 drivers/scsi/ipr.c 		spin_lock(&ioa_cfg->hrrq[j]._lock);
ioa_cfg          7267 drivers/scsi/ipr.c 		ioa_cfg->hrrq[j].allow_cmds = 1;
ioa_cfg          7268 drivers/scsi/ipr.c 		spin_unlock(&ioa_cfg->hrrq[j]._lock);
ioa_cfg          7271 drivers/scsi/ipr.c 	ioa_cfg->reset_cmd = NULL;
ioa_cfg          7272 drivers/scsi/ipr.c 	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
ioa_cfg          7274 drivers/scsi/ipr.c 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
ioa_cfg          7280 drivers/scsi/ipr.c 	schedule_work(&ioa_cfg->work_q);
ioa_cfg          7283 drivers/scsi/ipr.c 		list_del_init(&ioa_cfg->hostrcb[j]->queue);
ioa_cfg          7285 drivers/scsi/ipr.c 			ipr_send_hcam(ioa_cfg,
ioa_cfg          7287 drivers/scsi/ipr.c 				ioa_cfg->hostrcb[j]);
ioa_cfg          7289 drivers/scsi/ipr.c 			ipr_send_hcam(ioa_cfg,
ioa_cfg          7291 drivers/scsi/ipr.c 				ioa_cfg->hostrcb[j]);
ioa_cfg          7294 drivers/scsi/ipr.c 	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
ioa_cfg          7295 drivers/scsi/ipr.c 	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
ioa_cfg          7297 drivers/scsi/ipr.c 	ioa_cfg->reset_retries = 0;
ioa_cfg          7299 drivers/scsi/ipr.c 	wake_up_all(&ioa_cfg->reset_wait_q);
ioa_cfg          7301 drivers/scsi/ipr.c 	ioa_cfg->scsi_unblock = 1;
ioa_cfg          7302 drivers/scsi/ipr.c 	schedule_work(&ioa_cfg->work_q);
ioa_cfg          7337 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          7338 drivers/scsi/ipr.c 	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
ioa_cfg          7344 drivers/scsi/ipr.c 	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
ioa_cfg          7361 drivers/scsi/ipr.c 			       ioa_cfg->vpd_cbs_dma +
ioa_cfg          7369 drivers/scsi/ipr.c 		if (!ioa_cfg->sis64)
ioa_cfg          7428 drivers/scsi/ipr.c static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          7445 drivers/scsi/ipr.c 			dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          7465 drivers/scsi/ipr.c static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          7471 drivers/scsi/ipr.c 		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
ioa_cfg          7472 drivers/scsi/ipr.c 						       ioa_cfg->bus_attr[i].bus_width);
ioa_cfg          7474 drivers/scsi/ipr.c 		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
ioa_cfg          7475 drivers/scsi/ipr.c 			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
ioa_cfg          7489 drivers/scsi/ipr.c static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          7507 drivers/scsi/ipr.c 			dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          7513 drivers/scsi/ipr.c 		bus_attr = &ioa_cfg->bus_attr[i];
ioa_cfg          7564 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          7565 drivers/scsi/ipr.c 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
ioa_cfg          7569 drivers/scsi/ipr.c 	ipr_scsi_bus_speed_limit(ioa_cfg);
ioa_cfg          7570 drivers/scsi/ipr.c 	ipr_check_term_power(ioa_cfg, mode_pages);
ioa_cfg          7571 drivers/scsi/ipr.c 	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
ioa_cfg          7576 drivers/scsi/ipr.c 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
ioa_cfg          7580 drivers/scsi/ipr.c 	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
ioa_cfg          7625 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          7628 drivers/scsi/ipr.c 	dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          7632 drivers/scsi/ipr.c 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
ioa_cfg          7649 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          7654 drivers/scsi/ipr.c 		ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
ioa_cfg          7674 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          7678 drivers/scsi/ipr.c 			     0x28, ioa_cfg->vpd_cbs_dma +
ioa_cfg          7702 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          7703 drivers/scsi/ipr.c 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
ioa_cfg          7718 drivers/scsi/ipr.c 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
ioa_cfg          7762 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          7766 drivers/scsi/ipr.c 			     0x24, ioa_cfg->vpd_cbs_dma +
ioa_cfg          7793 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          7800 drivers/scsi/ipr.c 	if (ioa_cfg->sis64)
ioa_cfg          7801 drivers/scsi/ipr.c 		flag = ioa_cfg->u.cfg_table64->hdr64.flags;
ioa_cfg          7803 drivers/scsi/ipr.c 		flag = ioa_cfg->u.cfg_table->hdr.flags;
ioa_cfg          7806 drivers/scsi/ipr.c 		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
ioa_cfg          7808 drivers/scsi/ipr.c 	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
ioa_cfg          7811 drivers/scsi/ipr.c 	if (ioa_cfg->sis64)
ioa_cfg          7812 drivers/scsi/ipr.c 		entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
ioa_cfg          7814 drivers/scsi/ipr.c 		entries = ioa_cfg->u.cfg_table->hdr.num_entries;
ioa_cfg          7817 drivers/scsi/ipr.c 		if (ioa_cfg->sis64)
ioa_cfg          7818 drivers/scsi/ipr.c 			cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
ioa_cfg          7820 drivers/scsi/ipr.c 			cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
ioa_cfg          7825 drivers/scsi/ipr.c 				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
ioa_cfg          7832 drivers/scsi/ipr.c 			if (list_empty(&ioa_cfg->free_res_q)) {
ioa_cfg          7833 drivers/scsi/ipr.c 				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
ioa_cfg          7838 drivers/scsi/ipr.c 			res = list_entry(ioa_cfg->free_res_q.next,
ioa_cfg          7840 drivers/scsi/ipr.c 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
ioa_cfg          7854 drivers/scsi/ipr.c 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
ioa_cfg          7860 drivers/scsi/ipr.c 		list_move_tail(&res->queue, &ioa_cfg->free_res_q);
ioa_cfg          7863 drivers/scsi/ipr.c 	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
ioa_cfg          7884 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          7886 drivers/scsi/ipr.c 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ioa_cfg          7887 drivers/scsi/ipr.c 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
ioa_cfg          7891 drivers/scsi/ipr.c 		ioa_cfg->dual_raid = 1;
ioa_cfg          7892 drivers/scsi/ipr.c 	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
ioa_cfg          7899 drivers/scsi/ipr.c 	ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
ioa_cfg          7900 drivers/scsi/ipr.c 	ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
ioa_cfg          7901 drivers/scsi/ipr.c 	ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
ioa_cfg          7903 drivers/scsi/ipr.c 	ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
ioa_cfg          7945 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          7946 drivers/scsi/ipr.c 	struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
ioa_cfg          8033 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8034 drivers/scsi/ipr.c 	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
ioa_cfg          8035 drivers/scsi/ipr.c 	struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
ioa_cfg          8043 drivers/scsi/ipr.c 				  (ioa_cfg->vpd_cbs_dma
ioa_cfg          8066 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8067 drivers/scsi/ipr.c 	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
ioa_cfg          8068 drivers/scsi/ipr.c 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
ioa_cfg          8076 drivers/scsi/ipr.c 				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
ioa_cfg          8097 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8104 drivers/scsi/ipr.c 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
ioa_cfg          8123 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8129 drivers/scsi/ipr.c 	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
ioa_cfg          8131 drivers/scsi/ipr.c 	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
ioa_cfg          8133 drivers/scsi/ipr.c 	if (ipr_invalid_adapter(ioa_cfg)) {
ioa_cfg          8134 drivers/scsi/ipr.c 		dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          8138 drivers/scsi/ipr.c 			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
ioa_cfg          8139 drivers/scsi/ipr.c 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
ioa_cfg          8141 drivers/scsi/ipr.c 					&ioa_cfg->hrrq->hrrq_free_q);
ioa_cfg          8149 drivers/scsi/ipr.c 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
ioa_cfg          8167 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8173 drivers/scsi/ipr.c 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
ioa_cfg          8192 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8198 drivers/scsi/ipr.c 	if (ioa_cfg->identify_hrrq_index == 0)
ioa_cfg          8199 drivers/scsi/ipr.c 		dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
ioa_cfg          8201 drivers/scsi/ipr.c 	if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
ioa_cfg          8202 drivers/scsi/ipr.c 		hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
ioa_cfg          8208 drivers/scsi/ipr.c 		if (ioa_cfg->sis64)
ioa_cfg          8211 drivers/scsi/ipr.c 		if (ioa_cfg->nvectors == 1)
ioa_cfg          8231 drivers/scsi/ipr.c 					ioa_cfg->identify_hrrq_index;
ioa_cfg          8233 drivers/scsi/ipr.c 		if (ioa_cfg->sis64) {
ioa_cfg          8246 drivers/scsi/ipr.c 					ioa_cfg->identify_hrrq_index;
ioa_cfg          8251 drivers/scsi/ipr.c 		if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
ioa_cfg          8278 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8281 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          8283 drivers/scsi/ipr.c 	if (ioa_cfg->reset_cmd == ipr_cmd) {
ioa_cfg          8288 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          8325 drivers/scsi/ipr.c static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          8329 drivers/scsi/ipr.c 	for_each_hrrq(hrrq, ioa_cfg) {
ioa_cfg          8342 drivers/scsi/ipr.c 	ioa_cfg->identify_hrrq_index = 0;
ioa_cfg          8343 drivers/scsi/ipr.c 	if (ioa_cfg->hrrq_num == 1)
ioa_cfg          8344 drivers/scsi/ipr.c 		atomic_set(&ioa_cfg->hrrq_index, 0);
ioa_cfg          8346 drivers/scsi/ipr.c 		atomic_set(&ioa_cfg->hrrq_index, 1);
ioa_cfg          8349 drivers/scsi/ipr.c 	memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
ioa_cfg          8364 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8367 drivers/scsi/ipr.c 	feedback = readl(ioa_cfg->regs.init_feedback_reg);
ioa_cfg          8382 drivers/scsi/ipr.c 		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
ioa_cfg          8383 drivers/scsi/ipr.c 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
ioa_cfg          8384 drivers/scsi/ipr.c 		stage_time = ioa_cfg->transop_timeout;
ioa_cfg          8387 drivers/scsi/ipr.c 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
ioa_cfg          8392 drivers/scsi/ipr.c 			writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
ioa_cfg          8393 drivers/scsi/ipr.c 			int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
ioa_cfg          8420 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8427 drivers/scsi/ipr.c 	ipr_init_ioa_mem(ioa_cfg);
ioa_cfg          8429 drivers/scsi/ipr.c 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
ioa_cfg          8430 drivers/scsi/ipr.c 		spin_lock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          8431 drivers/scsi/ipr.c 		ioa_cfg->hrrq[i].allow_interrupts = 1;
ioa_cfg          8432 drivers/scsi/ipr.c 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          8434 drivers/scsi/ipr.c 	if (ioa_cfg->sis64) {
ioa_cfg          8436 drivers/scsi/ipr.c 		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
ioa_cfg          8437 drivers/scsi/ipr.c 		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
ioa_cfg          8440 drivers/scsi/ipr.c 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
ioa_cfg          8444 drivers/scsi/ipr.c 		       ioa_cfg->regs.clr_interrupt_mask_reg32);
ioa_cfg          8445 drivers/scsi/ipr.c 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
ioa_cfg          8450 drivers/scsi/ipr.c 	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
ioa_cfg          8452 drivers/scsi/ipr.c 	if (ioa_cfg->sis64) {
ioa_cfg          8455 drivers/scsi/ipr.c 		writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
ioa_cfg          8457 drivers/scsi/ipr.c 		writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
ioa_cfg          8459 drivers/scsi/ipr.c 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
ioa_cfg          8461 drivers/scsi/ipr.c 	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
ioa_cfg          8463 drivers/scsi/ipr.c 	if (ioa_cfg->sis64) {
ioa_cfg          8468 drivers/scsi/ipr.c 	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
ioa_cfg          8490 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8492 drivers/scsi/ipr.c 	if (ioa_cfg->sdt_state == GET_DUMP)
ioa_cfg          8493 drivers/scsi/ipr.c 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
ioa_cfg          8494 drivers/scsi/ipr.c 	else if (ioa_cfg->sdt_state == READ_DUMP)
ioa_cfg          8495 drivers/scsi/ipr.c 		ioa_cfg->sdt_state = ABORT_DUMP;
ioa_cfg          8497 drivers/scsi/ipr.c 	ioa_cfg->dump_timeout = 1;
ioa_cfg          8513 drivers/scsi/ipr.c static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          8515 drivers/scsi/ipr.c 	ioa_cfg->errors_logged++;
ioa_cfg          8516 drivers/scsi/ipr.c 	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
ioa_cfg          8529 drivers/scsi/ipr.c static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          8537 drivers/scsi/ipr.c 	mailbox = readl(ioa_cfg->ioa_mailbox);
ioa_cfg          8539 drivers/scsi/ipr.c 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
ioa_cfg          8540 drivers/scsi/ipr.c 		ipr_unit_check_no_data(ioa_cfg);
ioa_cfg          8545 drivers/scsi/ipr.c 	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
ioa_cfg          8551 drivers/scsi/ipr.c 		ipr_unit_check_no_data(ioa_cfg);
ioa_cfg          8563 drivers/scsi/ipr.c 	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
ioa_cfg          8568 drivers/scsi/ipr.c 	rc = ipr_get_ldump_data_section(ioa_cfg,
ioa_cfg          8574 drivers/scsi/ipr.c 		ipr_handle_log_data(ioa_cfg, hostrcb);
ioa_cfg          8577 drivers/scsi/ipr.c 		    ioa_cfg->sdt_state == GET_DUMP)
ioa_cfg          8578 drivers/scsi/ipr.c 			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
ioa_cfg          8580 drivers/scsi/ipr.c 		ipr_unit_check_no_data(ioa_cfg);
ioa_cfg          8582 drivers/scsi/ipr.c 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
ioa_cfg          8596 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8599 drivers/scsi/ipr.c 	ioa_cfg->ioa_unit_checked = 0;
ioa_cfg          8600 drivers/scsi/ipr.c 	ipr_get_unit_check_buffer(ioa_cfg);
ioa_cfg          8610 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8614 drivers/scsi/ipr.c 	if (ioa_cfg->sdt_state != GET_DUMP)
ioa_cfg          8617 drivers/scsi/ipr.c 	if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
ioa_cfg          8618 drivers/scsi/ipr.c 	    (readl(ioa_cfg->regs.sense_interrupt_reg) &
ioa_cfg          8622 drivers/scsi/ipr.c 			dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          8625 drivers/scsi/ipr.c 		ioa_cfg->sdt_state = READ_DUMP;
ioa_cfg          8626 drivers/scsi/ipr.c 		ioa_cfg->dump_timeout = 0;
ioa_cfg          8627 drivers/scsi/ipr.c 		if (ioa_cfg->sis64)
ioa_cfg          8632 drivers/scsi/ipr.c 		schedule_work(&ioa_cfg->work_q);
ioa_cfg          8657 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8661 drivers/scsi/ipr.c 	ioa_cfg->pdev->state_saved = true;
ioa_cfg          8662 drivers/scsi/ipr.c 	pci_restore_state(ioa_cfg->pdev);
ioa_cfg          8664 drivers/scsi/ipr.c 	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
ioa_cfg          8669 drivers/scsi/ipr.c 	ipr_fail_all_ops(ioa_cfg);
ioa_cfg          8671 drivers/scsi/ipr.c 	if (ioa_cfg->sis64) {
ioa_cfg          8673 drivers/scsi/ipr.c 		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
ioa_cfg          8674 drivers/scsi/ipr.c 		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
ioa_cfg          8677 drivers/scsi/ipr.c 	if (ioa_cfg->ioa_unit_checked) {
ioa_cfg          8678 drivers/scsi/ipr.c 		if (ioa_cfg->sis64) {
ioa_cfg          8683 drivers/scsi/ipr.c 			ioa_cfg->ioa_unit_checked = 0;
ioa_cfg          8684 drivers/scsi/ipr.c 			ipr_get_unit_check_buffer(ioa_cfg);
ioa_cfg          8691 drivers/scsi/ipr.c 	if (ioa_cfg->in_ioa_bringdown) {
ioa_cfg          8693 drivers/scsi/ipr.c 	} else if (ioa_cfg->sdt_state == GET_DUMP) {
ioa_cfg          8715 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8718 drivers/scsi/ipr.c 	if (ioa_cfg->cfg_locked)
ioa_cfg          8719 drivers/scsi/ipr.c 		pci_cfg_access_unlock(ioa_cfg->pdev);
ioa_cfg          8720 drivers/scsi/ipr.c 	ioa_cfg->cfg_locked = 0;
ioa_cfg          8737 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8741 drivers/scsi/ipr.c 	if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
ioa_cfg          8743 drivers/scsi/ipr.c 		       ioa_cfg->regs.set_uproc_interrupt_reg32);
ioa_cfg          8745 drivers/scsi/ipr.c 		rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
ioa_cfg          8752 drivers/scsi/ipr.c 		if (ioa_cfg->cfg_locked)
ioa_cfg          8753 drivers/scsi/ipr.c 			pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
ioa_cfg          8754 drivers/scsi/ipr.c 		ioa_cfg->cfg_locked = 0;
ioa_cfg          8791 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8792 drivers/scsi/ipr.c 	struct pci_dev *pdev = ioa_cfg->pdev;
ioa_cfg          8800 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          8801 drivers/scsi/ipr.c 	if (ioa_cfg->reset_cmd == ipr_cmd)
ioa_cfg          8803 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          8818 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8822 drivers/scsi/ipr.c 	queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
ioa_cfg          8839 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8842 drivers/scsi/ipr.c 	if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
ioa_cfg          8843 drivers/scsi/ipr.c 		ioa_cfg->cfg_locked = 1;
ioa_cfg          8844 drivers/scsi/ipr.c 		ipr_cmd->job_step = ioa_cfg->reset;
ioa_cfg          8852 drivers/scsi/ipr.c 			ipr_cmd->job_step = ioa_cfg->reset;
ioa_cfg          8853 drivers/scsi/ipr.c 			dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          8872 drivers/scsi/ipr.c 	ipr_cmd->ioa_cfg->cfg_locked = 0;
ioa_cfg          8885 drivers/scsi/ipr.c static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          8889 drivers/scsi/ipr.c 	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
ioa_cfg          8910 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8913 drivers/scsi/ipr.c 	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
ioa_cfg          8938 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8943 drivers/scsi/ipr.c 	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
ioa_cfg          8946 drivers/scsi/ipr.c 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
ioa_cfg          8947 drivers/scsi/ipr.c 		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
ioa_cfg          8971 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          8975 drivers/scsi/ipr.c 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
ioa_cfg          8992 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          9001 drivers/scsi/ipr.c 	for_each_hrrq(hrrq, ioa_cfg) {
ioa_cfg          9005 drivers/scsi/ipr.c 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
ioa_cfg          9031 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          9035 drivers/scsi/ipr.c 	struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
ioa_cfg          9041 drivers/scsi/ipr.c 		if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
ioa_cfg          9087 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          9088 drivers/scsi/ipr.c 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
ioa_cfg          9090 drivers/scsi/ipr.c 	dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
ioa_cfg          9109 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          9110 drivers/scsi/ipr.c 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
ioa_cfg          9126 drivers/scsi/ipr.c 	if (ioa_cfg->sis64)
ioa_cfg          9152 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          9161 drivers/scsi/ipr.c 			!ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
ioa_cfg          9171 drivers/scsi/ipr.c 		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
ioa_cfg          9199 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          9204 drivers/scsi/ipr.c 		if (ioa_cfg->reset_cmd != ipr_cmd) {
ioa_cfg          9240 drivers/scsi/ipr.c static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          9247 drivers/scsi/ipr.c 	ioa_cfg->in_reset_reload = 1;
ioa_cfg          9248 drivers/scsi/ipr.c 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
ioa_cfg          9249 drivers/scsi/ipr.c 		spin_lock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          9250 drivers/scsi/ipr.c 		ioa_cfg->hrrq[i].allow_cmds = 0;
ioa_cfg          9251 drivers/scsi/ipr.c 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          9254 drivers/scsi/ipr.c 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
ioa_cfg          9255 drivers/scsi/ipr.c 		ioa_cfg->scsi_unblock = 0;
ioa_cfg          9256 drivers/scsi/ipr.c 		ioa_cfg->scsi_blocked = 1;
ioa_cfg          9257 drivers/scsi/ipr.c 		scsi_block_requests(ioa_cfg->host);
ioa_cfg          9260 drivers/scsi/ipr.c 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioa_cfg          9261 drivers/scsi/ipr.c 	ioa_cfg->reset_cmd = ipr_cmd;
ioa_cfg          9280 drivers/scsi/ipr.c static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          9285 drivers/scsi/ipr.c 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
ioa_cfg          9288 drivers/scsi/ipr.c 	if (ioa_cfg->in_reset_reload) {
ioa_cfg          9289 drivers/scsi/ipr.c 		if (ioa_cfg->sdt_state == GET_DUMP)
ioa_cfg          9290 drivers/scsi/ipr.c 			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
ioa_cfg          9291 drivers/scsi/ipr.c 		else if (ioa_cfg->sdt_state == READ_DUMP)
ioa_cfg          9292 drivers/scsi/ipr.c 			ioa_cfg->sdt_state = ABORT_DUMP;
ioa_cfg          9295 drivers/scsi/ipr.c 	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
ioa_cfg          9296 drivers/scsi/ipr.c 		dev_err(&ioa_cfg->pdev->dev,
ioa_cfg          9299 drivers/scsi/ipr.c 		ioa_cfg->reset_retries = 0;
ioa_cfg          9300 drivers/scsi/ipr.c 		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
ioa_cfg          9301 drivers/scsi/ipr.c 			spin_lock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          9302 drivers/scsi/ipr.c 			ioa_cfg->hrrq[i].ioa_is_dead = 1;
ioa_cfg          9303 drivers/scsi/ipr.c 			spin_unlock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          9307 drivers/scsi/ipr.c 		if (ioa_cfg->in_ioa_bringdown) {
ioa_cfg          9308 drivers/scsi/ipr.c 			ioa_cfg->reset_cmd = NULL;
ioa_cfg          9309 drivers/scsi/ipr.c 			ioa_cfg->in_reset_reload = 0;
ioa_cfg          9310 drivers/scsi/ipr.c 			ipr_fail_all_ops(ioa_cfg);
ioa_cfg          9311 drivers/scsi/ipr.c 			wake_up_all(&ioa_cfg->reset_wait_q);
ioa_cfg          9313 drivers/scsi/ipr.c 			if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
ioa_cfg          9314 drivers/scsi/ipr.c 				ioa_cfg->scsi_unblock = 1;
ioa_cfg          9315 drivers/scsi/ipr.c 				schedule_work(&ioa_cfg->work_q);
ioa_cfg          9319 drivers/scsi/ipr.c 			ioa_cfg->in_ioa_bringdown = 1;
ioa_cfg          9324 drivers/scsi/ipr.c 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
ioa_cfg          9338 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ioa_cfg          9342 drivers/scsi/ipr.c 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
ioa_cfg          9343 drivers/scsi/ipr.c 		spin_lock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          9344 drivers/scsi/ipr.c 		ioa_cfg->hrrq[i].allow_interrupts = 0;
ioa_cfg          9345 drivers/scsi/ipr.c 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          9363 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
ioa_cfg          9365 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
ioa_cfg          9366 drivers/scsi/ipr.c 	if (!ioa_cfg->probe_done)
ioa_cfg          9368 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
ioa_cfg          9383 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
ioa_cfg          9385 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
ioa_cfg          9386 drivers/scsi/ipr.c 	if (ioa_cfg->probe_done)
ioa_cfg          9387 drivers/scsi/ipr.c 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
ioa_cfg          9388 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
ioa_cfg          9402 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
ioa_cfg          9404 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
ioa_cfg          9405 drivers/scsi/ipr.c 	if (ioa_cfg->probe_done) {
ioa_cfg          9406 drivers/scsi/ipr.c 		if (ioa_cfg->needs_warm_reset)
ioa_cfg          9407 drivers/scsi/ipr.c 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
ioa_cfg          9409 drivers/scsi/ipr.c 			_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
ioa_cfg          9412 drivers/scsi/ipr.c 		wake_up_all(&ioa_cfg->eeh_wait_q);
ioa_cfg          9413 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
ioa_cfg          9427 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
ioa_cfg          9430 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
ioa_cfg          9431 drivers/scsi/ipr.c 	if (ioa_cfg->probe_done) {
ioa_cfg          9432 drivers/scsi/ipr.c 		if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
ioa_cfg          9433 drivers/scsi/ipr.c 			ioa_cfg->sdt_state = ABORT_DUMP;
ioa_cfg          9434 drivers/scsi/ipr.c 		ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
ioa_cfg          9435 drivers/scsi/ipr.c 		ioa_cfg->in_ioa_bringdown = 1;
ioa_cfg          9436 drivers/scsi/ipr.c 		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
ioa_cfg          9437 drivers/scsi/ipr.c 			spin_lock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          9438 drivers/scsi/ipr.c 			ioa_cfg->hrrq[i].allow_cmds = 0;
ioa_cfg          9439 drivers/scsi/ipr.c 			spin_unlock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          9442 drivers/scsi/ipr.c 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
ioa_cfg          9444 drivers/scsi/ipr.c 		wake_up_all(&ioa_cfg->eeh_wait_q);
ioa_cfg          9445 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
ioa_cfg          9486 drivers/scsi/ipr.c static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          9492 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
ioa_cfg          9493 drivers/scsi/ipr.c 	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ioa_cfg          9494 drivers/scsi/ipr.c 	ioa_cfg->probe_done = 1;
ioa_cfg          9495 drivers/scsi/ipr.c 	if (ioa_cfg->needs_hard_reset) {
ioa_cfg          9496 drivers/scsi/ipr.c 		ioa_cfg->needs_hard_reset = 0;
ioa_cfg          9497 drivers/scsi/ipr.c 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
ioa_cfg          9499 drivers/scsi/ipr.c 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
ioa_cfg          9501 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
ioa_cfg          9514 drivers/scsi/ipr.c static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          9518 drivers/scsi/ipr.c 	if (ioa_cfg->ipr_cmnd_list) {
ioa_cfg          9520 drivers/scsi/ipr.c 			if (ioa_cfg->ipr_cmnd_list[i])
ioa_cfg          9521 drivers/scsi/ipr.c 				dma_pool_free(ioa_cfg->ipr_cmd_pool,
ioa_cfg          9522 drivers/scsi/ipr.c 					      ioa_cfg->ipr_cmnd_list[i],
ioa_cfg          9523 drivers/scsi/ipr.c 					      ioa_cfg->ipr_cmnd_list_dma[i]);
ioa_cfg          9525 drivers/scsi/ipr.c 			ioa_cfg->ipr_cmnd_list[i] = NULL;
ioa_cfg          9529 drivers/scsi/ipr.c 	if (ioa_cfg->ipr_cmd_pool)
ioa_cfg          9530 drivers/scsi/ipr.c 		dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
ioa_cfg          9532 drivers/scsi/ipr.c 	kfree(ioa_cfg->ipr_cmnd_list);
ioa_cfg          9533 drivers/scsi/ipr.c 	kfree(ioa_cfg->ipr_cmnd_list_dma);
ioa_cfg          9534 drivers/scsi/ipr.c 	ioa_cfg->ipr_cmnd_list = NULL;
ioa_cfg          9535 drivers/scsi/ipr.c 	ioa_cfg->ipr_cmnd_list_dma = NULL;
ioa_cfg          9536 drivers/scsi/ipr.c 	ioa_cfg->ipr_cmd_pool = NULL;
ioa_cfg          9546 drivers/scsi/ipr.c static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          9550 drivers/scsi/ipr.c 	kfree(ioa_cfg->res_entries);
ioa_cfg          9551 drivers/scsi/ipr.c 	dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
ioa_cfg          9552 drivers/scsi/ipr.c 			  ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
ioa_cfg          9553 drivers/scsi/ipr.c 	ipr_free_cmd_blks(ioa_cfg);
ioa_cfg          9555 drivers/scsi/ipr.c 	for (i = 0; i < ioa_cfg->hrrq_num; i++)
ioa_cfg          9556 drivers/scsi/ipr.c 		dma_free_coherent(&ioa_cfg->pdev->dev,
ioa_cfg          9557 drivers/scsi/ipr.c 				  sizeof(u32) * ioa_cfg->hrrq[i].size,
ioa_cfg          9558 drivers/scsi/ipr.c 				  ioa_cfg->hrrq[i].host_rrq,
ioa_cfg          9559 drivers/scsi/ipr.c 				  ioa_cfg->hrrq[i].host_rrq_dma);
ioa_cfg          9561 drivers/scsi/ipr.c 	dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
ioa_cfg          9562 drivers/scsi/ipr.c 			  ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
ioa_cfg          9565 drivers/scsi/ipr.c 		dma_free_coherent(&ioa_cfg->pdev->dev,
ioa_cfg          9567 drivers/scsi/ipr.c 				  ioa_cfg->hostrcb[i],
ioa_cfg          9568 drivers/scsi/ipr.c 				  ioa_cfg->hostrcb_dma[i]);
ioa_cfg          9571 drivers/scsi/ipr.c 	ipr_free_dump(ioa_cfg);
ioa_cfg          9572 drivers/scsi/ipr.c 	kfree(ioa_cfg->trace);
ioa_cfg          9585 drivers/scsi/ipr.c static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          9587 drivers/scsi/ipr.c 	struct pci_dev *pdev = ioa_cfg->pdev;
ioa_cfg          9590 drivers/scsi/ipr.c 	for (i = 0; i < ioa_cfg->nvectors; i++)
ioa_cfg          9591 drivers/scsi/ipr.c 		free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
ioa_cfg          9605 drivers/scsi/ipr.c static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          9607 drivers/scsi/ipr.c 	struct pci_dev *pdev = ioa_cfg->pdev;
ioa_cfg          9610 drivers/scsi/ipr.c 	ipr_free_irqs(ioa_cfg);
ioa_cfg          9611 drivers/scsi/ipr.c 	if (ioa_cfg->reset_work_q)
ioa_cfg          9612 drivers/scsi/ipr.c 		destroy_workqueue(ioa_cfg->reset_work_q);
ioa_cfg          9613 drivers/scsi/ipr.c 	iounmap(ioa_cfg->hdw_dma_regs);
ioa_cfg          9615 drivers/scsi/ipr.c 	ipr_free_mem(ioa_cfg);
ioa_cfg          9616 drivers/scsi/ipr.c 	scsi_host_put(ioa_cfg->host);
ioa_cfg          9628 drivers/scsi/ipr.c static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          9635 drivers/scsi/ipr.c 	ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
ioa_cfg          9638 drivers/scsi/ipr.c 	if (!ioa_cfg->ipr_cmd_pool)
ioa_cfg          9641 drivers/scsi/ipr.c 	ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
ioa_cfg          9642 drivers/scsi/ipr.c 	ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
ioa_cfg          9644 drivers/scsi/ipr.c 	if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
ioa_cfg          9645 drivers/scsi/ipr.c 		ipr_free_cmd_blks(ioa_cfg);
ioa_cfg          9649 drivers/scsi/ipr.c 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
ioa_cfg          9650 drivers/scsi/ipr.c 		if (ioa_cfg->hrrq_num > 1) {
ioa_cfg          9653 drivers/scsi/ipr.c 				ioa_cfg->hrrq[i].min_cmd_id = 0;
ioa_cfg          9654 drivers/scsi/ipr.c 				ioa_cfg->hrrq[i].max_cmd_id =
ioa_cfg          9659 drivers/scsi/ipr.c 					(ioa_cfg->hrrq_num - 1);
ioa_cfg          9660 drivers/scsi/ipr.c 				ioa_cfg->hrrq[i].min_cmd_id =
ioa_cfg          9663 drivers/scsi/ipr.c 				ioa_cfg->hrrq[i].max_cmd_id =
ioa_cfg          9669 drivers/scsi/ipr.c 			ioa_cfg->hrrq[i].min_cmd_id = 0;
ioa_cfg          9670 drivers/scsi/ipr.c 			ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
ioa_cfg          9672 drivers/scsi/ipr.c 		ioa_cfg->hrrq[i].size = entries_each_hrrq;
ioa_cfg          9675 drivers/scsi/ipr.c 	BUG_ON(ioa_cfg->hrrq_num == 0);
ioa_cfg          9678 drivers/scsi/ipr.c 		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
ioa_cfg          9680 drivers/scsi/ipr.c 		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
ioa_cfg          9681 drivers/scsi/ipr.c 		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
ioa_cfg          9685 drivers/scsi/ipr.c 		ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
ioa_cfg          9689 drivers/scsi/ipr.c 			ipr_free_cmd_blks(ioa_cfg);
ioa_cfg          9693 drivers/scsi/ipr.c 		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
ioa_cfg          9694 drivers/scsi/ipr.c 		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
ioa_cfg          9698 drivers/scsi/ipr.c 		if (ioa_cfg->sis64)
ioa_cfg          9704 drivers/scsi/ipr.c 		if (ioa_cfg->sis64) {
ioa_cfg          9718 drivers/scsi/ipr.c 		ipr_cmd->ioa_cfg = ioa_cfg;
ioa_cfg          9723 drivers/scsi/ipr.c 		ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
ioa_cfg          9725 drivers/scsi/ipr.c 		if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
ioa_cfg          9739 drivers/scsi/ipr.c static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          9741 drivers/scsi/ipr.c 	struct pci_dev *pdev = ioa_cfg->pdev;
ioa_cfg          9745 drivers/scsi/ipr.c 	ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
ioa_cfg          9749 drivers/scsi/ipr.c 	if (!ioa_cfg->res_entries)
ioa_cfg          9752 drivers/scsi/ipr.c 	for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
ioa_cfg          9753 drivers/scsi/ipr.c 		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
ioa_cfg          9754 drivers/scsi/ipr.c 		ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
ioa_cfg          9757 drivers/scsi/ipr.c 	ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
ioa_cfg          9759 drivers/scsi/ipr.c 					      &ioa_cfg->vpd_cbs_dma,
ioa_cfg          9762 drivers/scsi/ipr.c 	if (!ioa_cfg->vpd_cbs)
ioa_cfg          9765 drivers/scsi/ipr.c 	if (ipr_alloc_cmd_blks(ioa_cfg))
ioa_cfg          9768 drivers/scsi/ipr.c 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
ioa_cfg          9769 drivers/scsi/ipr.c 		ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
ioa_cfg          9770 drivers/scsi/ipr.c 					sizeof(u32) * ioa_cfg->hrrq[i].size,
ioa_cfg          9771 drivers/scsi/ipr.c 					&ioa_cfg->hrrq[i].host_rrq_dma,
ioa_cfg          9774 drivers/scsi/ipr.c 		if (!ioa_cfg->hrrq[i].host_rrq)  {
ioa_cfg          9777 drivers/scsi/ipr.c 					sizeof(u32) * ioa_cfg->hrrq[i].size,
ioa_cfg          9778 drivers/scsi/ipr.c 					ioa_cfg->hrrq[i].host_rrq,
ioa_cfg          9779 drivers/scsi/ipr.c 					ioa_cfg->hrrq[i].host_rrq_dma);
ioa_cfg          9782 drivers/scsi/ipr.c 		ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
ioa_cfg          9785 drivers/scsi/ipr.c 	ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
ioa_cfg          9786 drivers/scsi/ipr.c 						  ioa_cfg->cfg_table_size,
ioa_cfg          9787 drivers/scsi/ipr.c 						  &ioa_cfg->cfg_table_dma,
ioa_cfg          9790 drivers/scsi/ipr.c 	if (!ioa_cfg->u.cfg_table)
ioa_cfg          9794 drivers/scsi/ipr.c 		ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
ioa_cfg          9796 drivers/scsi/ipr.c 							 &ioa_cfg->hostrcb_dma[i],
ioa_cfg          9799 drivers/scsi/ipr.c 		if (!ioa_cfg->hostrcb[i])
ioa_cfg          9802 drivers/scsi/ipr.c 		ioa_cfg->hostrcb[i]->hostrcb_dma =
ioa_cfg          9803 drivers/scsi/ipr.c 			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
ioa_cfg          9804 drivers/scsi/ipr.c 		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
ioa_cfg          9805 drivers/scsi/ipr.c 		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
ioa_cfg          9808 drivers/scsi/ipr.c 	ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
ioa_cfg          9812 drivers/scsi/ipr.c 	if (!ioa_cfg->trace)
ioa_cfg          9823 drivers/scsi/ipr.c 				  ioa_cfg->hostrcb[i],
ioa_cfg          9824 drivers/scsi/ipr.c 				  ioa_cfg->hostrcb_dma[i]);
ioa_cfg          9826 drivers/scsi/ipr.c 	dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
ioa_cfg          9827 drivers/scsi/ipr.c 			  ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
ioa_cfg          9829 drivers/scsi/ipr.c 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
ioa_cfg          9831 drivers/scsi/ipr.c 				  sizeof(u32) * ioa_cfg->hrrq[i].size,
ioa_cfg          9832 drivers/scsi/ipr.c 				  ioa_cfg->hrrq[i].host_rrq,
ioa_cfg          9833 drivers/scsi/ipr.c 				  ioa_cfg->hrrq[i].host_rrq_dma);
ioa_cfg          9836 drivers/scsi/ipr.c 	ipr_free_cmd_blks(ioa_cfg);
ioa_cfg          9839 drivers/scsi/ipr.c 			  ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
ioa_cfg          9841 drivers/scsi/ipr.c 	kfree(ioa_cfg->res_entries);
ioa_cfg          9852 drivers/scsi/ipr.c static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          9857 drivers/scsi/ipr.c 		ioa_cfg->bus_attr[i].bus = i;
ioa_cfg          9858 drivers/scsi/ipr.c 		ioa_cfg->bus_attr[i].qas_enabled = 0;
ioa_cfg          9859 drivers/scsi/ipr.c 		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
ioa_cfg          9861 drivers/scsi/ipr.c 			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
ioa_cfg          9863 drivers/scsi/ipr.c 			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
ioa_cfg          9874 drivers/scsi/ipr.c static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          9880 drivers/scsi/ipr.c 	p = &ioa_cfg->chip_cfg->regs;
ioa_cfg          9881 drivers/scsi/ipr.c 	t = &ioa_cfg->regs;
ioa_cfg          9882 drivers/scsi/ipr.c 	base = ioa_cfg->hdw_dma_regs;
ioa_cfg          9901 drivers/scsi/ipr.c 	if (ioa_cfg->sis64) {
ioa_cfg          9918 drivers/scsi/ipr.c static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          9923 drivers/scsi/ipr.c 	ioa_cfg->host = host;
ioa_cfg          9924 drivers/scsi/ipr.c 	ioa_cfg->pdev = pdev;
ioa_cfg          9925 drivers/scsi/ipr.c 	ioa_cfg->log_level = ipr_log_level;
ioa_cfg          9926 drivers/scsi/ipr.c 	ioa_cfg->doorbell = IPR_DOORBELL;
ioa_cfg          9927 drivers/scsi/ipr.c 	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
ioa_cfg          9928 drivers/scsi/ipr.c 	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
ioa_cfg          9929 drivers/scsi/ipr.c 	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
ioa_cfg          9930 drivers/scsi/ipr.c 	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
ioa_cfg          9931 drivers/scsi/ipr.c 	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
ioa_cfg          9932 drivers/scsi/ipr.c 	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
ioa_cfg          9934 drivers/scsi/ipr.c 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
ioa_cfg          9935 drivers/scsi/ipr.c 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
ioa_cfg          9936 drivers/scsi/ipr.c 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
ioa_cfg          9937 drivers/scsi/ipr.c 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
ioa_cfg          9938 drivers/scsi/ipr.c 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
ioa_cfg          9939 drivers/scsi/ipr.c 	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
ioa_cfg          9940 drivers/scsi/ipr.c 	INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
ioa_cfg          9941 drivers/scsi/ipr.c 	init_waitqueue_head(&ioa_cfg->reset_wait_q);
ioa_cfg          9942 drivers/scsi/ipr.c 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
ioa_cfg          9943 drivers/scsi/ipr.c 	init_waitqueue_head(&ioa_cfg->eeh_wait_q);
ioa_cfg          9944 drivers/scsi/ipr.c 	ioa_cfg->sdt_state = INACTIVE;
ioa_cfg          9946 drivers/scsi/ipr.c 	ipr_initialize_bus_attr(ioa_cfg);
ioa_cfg          9947 drivers/scsi/ipr.c 	ioa_cfg->max_devs_supported = ipr_max_devs;
ioa_cfg          9949 drivers/scsi/ipr.c 	if (ioa_cfg->sis64) {
ioa_cfg          9954 drivers/scsi/ipr.c 			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
ioa_cfg          9955 drivers/scsi/ipr.c 		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
ioa_cfg          9957 drivers/scsi/ipr.c 					       * ioa_cfg->max_devs_supported)));
ioa_cfg          9963 drivers/scsi/ipr.c 			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
ioa_cfg          9964 drivers/scsi/ipr.c 		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
ioa_cfg          9966 drivers/scsi/ipr.c 					       * ioa_cfg->max_devs_supported)));
ioa_cfg          9971 drivers/scsi/ipr.c 	host->can_queue = ioa_cfg->max_cmds;
ioa_cfg          9972 drivers/scsi/ipr.c 	pci_set_drvdata(pdev, ioa_cfg);
ioa_cfg          9974 drivers/scsi/ipr.c 	for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
ioa_cfg          9975 drivers/scsi/ipr.c 		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
ioa_cfg          9976 drivers/scsi/ipr.c 		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
ioa_cfg          9977 drivers/scsi/ipr.c 		spin_lock_init(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          9979 drivers/scsi/ipr.c 			ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
ioa_cfg          9981 drivers/scsi/ipr.c 			ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
ioa_cfg          10012 drivers/scsi/ipr.c static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          10014 drivers/scsi/ipr.c 	struct pci_dev *pdev = ioa_cfg->pdev;
ioa_cfg          10017 drivers/scsi/ipr.c 		wait_event_timeout(ioa_cfg->eeh_wait_q,
ioa_cfg          10024 drivers/scsi/ipr.c static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg          10026 drivers/scsi/ipr.c 	int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
ioa_cfg          10028 drivers/scsi/ipr.c 	for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
ioa_cfg          10029 drivers/scsi/ipr.c 		snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
ioa_cfg          10030 drivers/scsi/ipr.c 			 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
ioa_cfg          10031 drivers/scsi/ipr.c 		ioa_cfg->vectors_info[vec_idx].
ioa_cfg          10032 drivers/scsi/ipr.c 			desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
ioa_cfg          10036 drivers/scsi/ipr.c static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          10041 drivers/scsi/ipr.c 	for (i = 1; i < ioa_cfg->nvectors; i++) {
ioa_cfg          10045 drivers/scsi/ipr.c 			ioa_cfg->vectors_info[i].desc,
ioa_cfg          10046 drivers/scsi/ipr.c 			&ioa_cfg->hrrq[i]);
ioa_cfg          10050 drivers/scsi/ipr.c 					&ioa_cfg->hrrq[i]);
ioa_cfg          10069 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
ioa_cfg          10073 drivers/scsi/ipr.c 	dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
ioa_cfg          10074 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          10076 drivers/scsi/ipr.c 	ioa_cfg->msi_received = 1;
ioa_cfg          10077 drivers/scsi/ipr.c 	wake_up(&ioa_cfg->msi_wait_q);
ioa_cfg          10079 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          10094 drivers/scsi/ipr.c static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
ioa_cfg          10103 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          10104 drivers/scsi/ipr.c 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
ioa_cfg          10105 drivers/scsi/ipr.c 	ioa_cfg->msi_received = 0;
ioa_cfg          10106 drivers/scsi/ipr.c 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
ioa_cfg          10107 drivers/scsi/ipr.c 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
ioa_cfg          10108 drivers/scsi/ipr.c 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
ioa_cfg          10109 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          10111 drivers/scsi/ipr.c 	rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
ioa_cfg          10118 drivers/scsi/ipr.c 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
ioa_cfg          10119 drivers/scsi/ipr.c 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
ioa_cfg          10120 drivers/scsi/ipr.c 	wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
ioa_cfg          10121 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          10122 drivers/scsi/ipr.c 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
ioa_cfg          10124 drivers/scsi/ipr.c 	if (!ioa_cfg->msi_received) {
ioa_cfg          10131 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          10133 drivers/scsi/ipr.c 	free_irq(irq, ioa_cfg);
ioa_cfg          10150 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          10162 drivers/scsi/ipr.c 	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
ioa_cfg          10170 drivers/scsi/ipr.c 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
ioa_cfg          10171 drivers/scsi/ipr.c 	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
ioa_cfg          10172 drivers/scsi/ipr.c 	ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
ioa_cfg          10174 drivers/scsi/ipr.c 	ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
ioa_cfg          10176 drivers/scsi/ipr.c 	if (!ioa_cfg->ipr_chip) {
ioa_cfg          10183 drivers/scsi/ipr.c 	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
ioa_cfg          10184 drivers/scsi/ipr.c 	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
ioa_cfg          10185 drivers/scsi/ipr.c 	ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
ioa_cfg          10186 drivers/scsi/ipr.c 	ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
ioa_cfg          10189 drivers/scsi/ipr.c 		ioa_cfg->transop_timeout = ipr_transop_timeout;
ioa_cfg          10191 drivers/scsi/ipr.c 		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
ioa_cfg          10193 drivers/scsi/ipr.c 		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
ioa_cfg          10195 drivers/scsi/ipr.c 	ioa_cfg->revid = pdev->revision;
ioa_cfg          10197 drivers/scsi/ipr.c 	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
ioa_cfg          10212 drivers/scsi/ipr.c 			ipr_wait_for_pci_err_recovery(ioa_cfg);
ioa_cfg          10218 drivers/scsi/ipr.c 			ipr_wait_for_pci_err_recovery(ioa_cfg);
ioa_cfg          10232 drivers/scsi/ipr.c 	ioa_cfg->hdw_dma_regs = ipr_regs;
ioa_cfg          10233 drivers/scsi/ipr.c 	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
ioa_cfg          10234 drivers/scsi/ipr.c 	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
ioa_cfg          10236 drivers/scsi/ipr.c 	ipr_init_regs(ioa_cfg);
ioa_cfg          10238 drivers/scsi/ipr.c 	if (ioa_cfg->sis64) {
ioa_cfg          10254 drivers/scsi/ipr.c 				   ioa_cfg->chip_cfg->cache_line_size);
ioa_cfg          10258 drivers/scsi/ipr.c 		ipr_wait_for_pci_err_recovery(ioa_cfg);
ioa_cfg          10264 drivers/scsi/ipr.c 	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
ioa_cfg          10265 drivers/scsi/ipr.c 	ipr_wait_for_pci_err_recovery(ioa_cfg);
ioa_cfg          10274 drivers/scsi/ipr.c 	if (ioa_cfg->ipr_chip->has_msi)
ioa_cfg          10278 drivers/scsi/ipr.c 		ipr_wait_for_pci_err_recovery(ioa_cfg);
ioa_cfg          10281 drivers/scsi/ipr.c 	ioa_cfg->nvectors = rc;
ioa_cfg          10284 drivers/scsi/ipr.c 		ioa_cfg->clear_isr = 1;
ioa_cfg          10289 drivers/scsi/ipr.c 		ipr_wait_for_pci_err_recovery(ioa_cfg);
ioa_cfg          10298 drivers/scsi/ipr.c 		rc = ipr_test_msi(ioa_cfg, pdev);
ioa_cfg          10302 drivers/scsi/ipr.c 				"Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
ioa_cfg          10306 drivers/scsi/ipr.c 			ipr_wait_for_pci_err_recovery(ioa_cfg);
ioa_cfg          10309 drivers/scsi/ipr.c 			ioa_cfg->nvectors = 1;
ioa_cfg          10310 drivers/scsi/ipr.c 			ioa_cfg->clear_isr = 1;
ioa_cfg          10317 drivers/scsi/ipr.c 	ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
ioa_cfg          10321 drivers/scsi/ipr.c 	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
ioa_cfg          10324 drivers/scsi/ipr.c 	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
ioa_cfg          10327 drivers/scsi/ipr.c 	rc = ipr_alloc_mem(ioa_cfg);
ioa_cfg          10347 drivers/scsi/ipr.c 	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
ioa_cfg          10348 drivers/scsi/ipr.c 	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
ioa_cfg          10349 drivers/scsi/ipr.c 	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ioa_cfg          10351 drivers/scsi/ipr.c 		ioa_cfg->needs_hard_reset = 1;
ioa_cfg          10353 drivers/scsi/ipr.c 		ioa_cfg->needs_hard_reset = 1;
ioa_cfg          10355 drivers/scsi/ipr.c 		ioa_cfg->ioa_unit_checked = 1;
ioa_cfg          10357 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          10358 drivers/scsi/ipr.c 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
ioa_cfg          10359 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          10362 drivers/scsi/ipr.c 		name_msi_vectors(ioa_cfg);
ioa_cfg          10364 drivers/scsi/ipr.c 			ioa_cfg->vectors_info[0].desc,
ioa_cfg          10365 drivers/scsi/ipr.c 			&ioa_cfg->hrrq[0]);
ioa_cfg          10367 drivers/scsi/ipr.c 			rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
ioa_cfg          10371 drivers/scsi/ipr.c 			 IPR_NAME, &ioa_cfg->hrrq[0]);
ioa_cfg          10380 drivers/scsi/ipr.c 	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
ioa_cfg          10381 drivers/scsi/ipr.c 		ioa_cfg->needs_warm_reset = 1;
ioa_cfg          10382 drivers/scsi/ipr.c 		ioa_cfg->reset = ipr_reset_slot_reset;
ioa_cfg          10384 drivers/scsi/ipr.c 		ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
ioa_cfg          10387 drivers/scsi/ipr.c 		if (!ioa_cfg->reset_work_q) {
ioa_cfg          10393 drivers/scsi/ipr.c 		ioa_cfg->reset = ipr_reset_start_bist;
ioa_cfg          10396 drivers/scsi/ipr.c 	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
ioa_cfg          10404 drivers/scsi/ipr.c 	ipr_free_irqs(ioa_cfg);
ioa_cfg          10406 drivers/scsi/ipr.c 	ipr_free_mem(ioa_cfg);
ioa_cfg          10408 drivers/scsi/ipr.c 	ipr_wait_for_pci_err_recovery(ioa_cfg);
ioa_cfg          10435 drivers/scsi/ipr.c static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg          10439 drivers/scsi/ipr.c 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
ioa_cfg          10440 drivers/scsi/ipr.c 		ioa_cfg->sdt_state = ABORT_DUMP;
ioa_cfg          10441 drivers/scsi/ipr.c 	ioa_cfg->reset_retries = 0;
ioa_cfg          10442 drivers/scsi/ipr.c 	ioa_cfg->in_ioa_bringdown = 1;
ioa_cfg          10443 drivers/scsi/ipr.c 	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
ioa_cfg          10459 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
ioa_cfg          10464 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
ioa_cfg          10465 drivers/scsi/ipr.c 	while (ioa_cfg->in_reset_reload) {
ioa_cfg          10466 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
ioa_cfg          10467 drivers/scsi/ipr.c 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
ioa_cfg          10468 drivers/scsi/ipr.c 		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
ioa_cfg          10471 drivers/scsi/ipr.c 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
ioa_cfg          10472 drivers/scsi/ipr.c 		spin_lock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          10473 drivers/scsi/ipr.c 		ioa_cfg->hrrq[i].removing_ioa = 1;
ioa_cfg          10474 drivers/scsi/ipr.c 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg          10477 drivers/scsi/ipr.c 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
ioa_cfg          10479 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
ioa_cfg          10480 drivers/scsi/ipr.c 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
ioa_cfg          10481 drivers/scsi/ipr.c 	flush_work(&ioa_cfg->work_q);
ioa_cfg          10482 drivers/scsi/ipr.c 	if (ioa_cfg->reset_work_q)
ioa_cfg          10483 drivers/scsi/ipr.c 		flush_workqueue(ioa_cfg->reset_work_q);
ioa_cfg          10484 drivers/scsi/ipr.c 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
ioa_cfg          10485 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
ioa_cfg          10488 drivers/scsi/ipr.c 	list_del(&ioa_cfg->queue);
ioa_cfg          10491 drivers/scsi/ipr.c 	if (ioa_cfg->sdt_state == ABORT_DUMP)
ioa_cfg          10492 drivers/scsi/ipr.c 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
ioa_cfg          10493 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
ioa_cfg          10495 drivers/scsi/ipr.c 	ipr_free_all_resources(ioa_cfg);
ioa_cfg          10511 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
ioa_cfg          10515 drivers/scsi/ipr.c 	ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
ioa_cfg          10517 drivers/scsi/ipr.c 	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
ioa_cfg          10519 drivers/scsi/ipr.c 	sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
ioa_cfg          10521 drivers/scsi/ipr.c 	scsi_remove_host(ioa_cfg->host);
ioa_cfg          10536 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          10545 drivers/scsi/ipr.c 	ioa_cfg = pci_get_drvdata(pdev);
ioa_cfg          10546 drivers/scsi/ipr.c 	rc = ipr_probe_ioa_part2(ioa_cfg);
ioa_cfg          10553 drivers/scsi/ipr.c 	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
ioa_cfg          10560 drivers/scsi/ipr.c 	rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
ioa_cfg          10564 drivers/scsi/ipr.c 		scsi_remove_host(ioa_cfg->host);
ioa_cfg          10569 drivers/scsi/ipr.c 	rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
ioa_cfg          10573 drivers/scsi/ipr.c 		ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
ioa_cfg          10575 drivers/scsi/ipr.c 		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
ioa_cfg          10577 drivers/scsi/ipr.c 		scsi_remove_host(ioa_cfg->host);
ioa_cfg          10582 drivers/scsi/ipr.c 	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
ioa_cfg          10586 drivers/scsi/ipr.c 		sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
ioa_cfg          10588 drivers/scsi/ipr.c 		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
ioa_cfg          10590 drivers/scsi/ipr.c 		scsi_remove_host(ioa_cfg->host);
ioa_cfg          10594 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
ioa_cfg          10595 drivers/scsi/ipr.c 	ioa_cfg->scan_enabled = 1;
ioa_cfg          10596 drivers/scsi/ipr.c 	schedule_work(&ioa_cfg->work_q);
ioa_cfg          10597 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
ioa_cfg          10599 drivers/scsi/ipr.c 	ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
ioa_cfg          10601 drivers/scsi/ipr.c 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
ioa_cfg          10602 drivers/scsi/ipr.c 		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
ioa_cfg          10603 drivers/scsi/ipr.c 			irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
ioa_cfg          10604 drivers/scsi/ipr.c 					ioa_cfg->iopoll_weight, ipr_iopoll);
ioa_cfg          10608 drivers/scsi/ipr.c 	scsi_scan_host(ioa_cfg->host);
ioa_cfg          10625 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
ioa_cfg          10630 drivers/scsi/ipr.c 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          10631 drivers/scsi/ipr.c 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
ioa_cfg          10632 drivers/scsi/ipr.c 		ioa_cfg->iopoll_weight = 0;
ioa_cfg          10633 drivers/scsi/ipr.c 		for (i = 1; i < ioa_cfg->hrrq_num; i++)
ioa_cfg          10634 drivers/scsi/ipr.c 			irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
ioa_cfg          10637 drivers/scsi/ipr.c 	while (ioa_cfg->in_reset_reload) {
ioa_cfg          10638 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          10639 drivers/scsi/ipr.c 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
ioa_cfg          10640 drivers/scsi/ipr.c 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          10643 drivers/scsi/ipr.c 	if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
ioa_cfg          10646 drivers/scsi/ipr.c 	ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
ioa_cfg          10647 drivers/scsi/ipr.c 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg          10648 drivers/scsi/ipr.c 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
ioa_cfg          10649 drivers/scsi/ipr.c 	if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
ioa_cfg          10650 drivers/scsi/ipr.c 		ipr_free_irqs(ioa_cfg);
ioa_cfg          10651 drivers/scsi/ipr.c 		pci_disable_device(ioa_cfg->pdev);
ioa_cfg          10804 drivers/scsi/ipr.c 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          10812 drivers/scsi/ipr.c 	list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
ioa_cfg          10813 drivers/scsi/ipr.c 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
ioa_cfg          10814 drivers/scsi/ipr.c 		if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
ioa_cfg          10815 drivers/scsi/ipr.c 		    (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
ioa_cfg          10816 drivers/scsi/ipr.c 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
ioa_cfg          10820 drivers/scsi/ipr.c 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioa_cfg          10827 drivers/scsi/ipr.c 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
ioa_cfg            44 drivers/scsi/ipr.h #define IPR_NUM_BASE_CMD_BLKS			(ioa_cfg->max_cmds)
ioa_cfg           486 drivers/scsi/ipr.h 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          1102 drivers/scsi/ipr.h #define for_each_hrrq(hrrq, ioa_cfg) \
ioa_cfg          1103 drivers/scsi/ipr.h 		for (hrrq = (ioa_cfg)->hrrq; \
ioa_cfg          1104 drivers/scsi/ipr.h 			hrrq < ((ioa_cfg)->hrrq + (ioa_cfg)->hrrq_num); hrrq++)
ioa_cfg          1235 drivers/scsi/ipr.h 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          1282 drivers/scsi/ipr.h 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          1323 drivers/scsi/ipr.h 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          1639 drivers/scsi/ipr.h 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          1725 drivers/scsi/ipr.h 	struct ipr_ioa_cfg *ioa_cfg;
ioa_cfg          1782 drivers/scsi/ipr.h #define ipr_res_printk(level, ioa_cfg, bus, target, lun, fmt, ...) \
ioa_cfg          1783 drivers/scsi/ipr.h 	printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \
ioa_cfg          1786 drivers/scsi/ipr.h #define ipr_res_err(ioa_cfg, res, fmt, ...) \
ioa_cfg          1787 drivers/scsi/ipr.h 	ipr_res_printk(KERN_ERR, ioa_cfg, (res)->bus, (res)->target, (res)->lun, fmt, ##__VA_ARGS__)
ioa_cfg          1789 drivers/scsi/ipr.h #define ipr_ra_printk(level, ioa_cfg, ra, fmt, ...) \
ioa_cfg          1790 drivers/scsi/ipr.h 	printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \
ioa_cfg          1793 drivers/scsi/ipr.h #define ipr_ra_err(ioa_cfg, ra, fmt, ...) \
ioa_cfg          1794 drivers/scsi/ipr.h 	ipr_ra_printk(KERN_ERR, ioa_cfg, ra, fmt, ##__VA_ARGS__)
ioa_cfg          1796 drivers/scsi/ipr.h #define ipr_phys_res_err(ioa_cfg, res, fmt, ...)			\
ioa_cfg          1802 drivers/scsi/ipr.h 			##__VA_ARGS__, (ioa_cfg)->host->host_no,	\
ioa_cfg          1810 drivers/scsi/ipr.h 		if ((hostrcb)->ioa_cfg->sis64) {			\
ioa_cfg          1812 drivers/scsi/ipr.h 				ipr_format_res_path(hostrcb->ioa_cfg,	\
ioa_cfg          1818 drivers/scsi/ipr.h 			ipr_ra_err((hostrcb)->ioa_cfg,			\
ioa_cfg          1823 drivers/scsi/ipr.h 		dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, __VA_ARGS__); \
ioa_cfg          1944 drivers/scsi/ipr.h 	if (hostrcb->ioa_cfg->sis64) {