Lines Matching refs:h
208 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
209 static struct CommandList *cmd_alloc(struct ctlr_info *h);
210 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
213 static void hpsa_free_cmd_pool(struct ctlr_info *h);
216 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
227 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
228 static int check_for_unit_attention(struct ctlr_info *h,
230 static void check_ioctl_unit_attention(struct ctlr_info *h,
235 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
236 static inline u32 next_command(struct ctlr_info *h, u8 q);
246 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
249 static void hpsa_drain_accel_commands(struct ctlr_info *h);
250 static void hpsa_flush_cache(struct ctlr_info *h);
251 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
268 static int check_for_unit_attention(struct ctlr_info *h, in check_for_unit_attention() argument
276 dev_warn(&h->pdev->dev, HPSA "%d: a state change " in check_for_unit_attention()
277 "detected, command retried\n", h->ctlr); in check_for_unit_attention()
280 dev_warn(&h->pdev->dev, in check_for_unit_attention()
281 HPSA "%d: LUN failure detected\n", h->ctlr); in check_for_unit_attention()
284 dev_warn(&h->pdev->dev, in check_for_unit_attention()
285 HPSA "%d: report LUN data changed\n", h->ctlr); in check_for_unit_attention()
292 dev_warn(&h->pdev->dev, HPSA "%d: a power on " in check_for_unit_attention()
293 "or device reset detected\n", h->ctlr); in check_for_unit_attention()
296 dev_warn(&h->pdev->dev, HPSA "%d: unit attention " in check_for_unit_attention()
297 "cleared by another initiator\n", h->ctlr); in check_for_unit_attention()
300 dev_warn(&h->pdev->dev, HPSA "%d: unknown " in check_for_unit_attention()
301 "unit attention detected\n", h->ctlr); in check_for_unit_attention()
307 static int check_for_busy(struct ctlr_info *h, struct CommandList *c) in check_for_busy() argument
313 dev_warn(&h->pdev->dev, HPSA "device busy"); in check_for_busy()
322 struct ctlr_info *h; in host_store_hp_ssd_smart_path_status() local
333 h = shost_to_hba(shost); in host_store_hp_ssd_smart_path_status()
334 h->acciopath_status = !!status; in host_store_hp_ssd_smart_path_status()
335 dev_warn(&h->pdev->dev, in host_store_hp_ssd_smart_path_status()
337 h->acciopath_status ? "enabled" : "disabled"); in host_store_hp_ssd_smart_path_status()
346 struct ctlr_info *h; in host_store_raid_offload_debug() local
359 h = shost_to_hba(shost); in host_store_raid_offload_debug()
360 h->raid_offload_debug = debug_level; in host_store_raid_offload_debug()
361 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", in host_store_raid_offload_debug()
362 h->raid_offload_debug); in host_store_raid_offload_debug()
370 struct ctlr_info *h; in host_store_rescan() local
372 h = shost_to_hba(shost); in host_store_rescan()
373 hpsa_scan_start(h->scsi_host); in host_store_rescan()
380 struct ctlr_info *h; in host_show_firmware_revision() local
384 h = shost_to_hba(shost); in host_show_firmware_revision()
385 if (!h->hba_inquiry_data) in host_show_firmware_revision()
387 fwrev = &h->hba_inquiry_data[32]; in host_show_firmware_revision()
396 struct ctlr_info *h = shost_to_hba(shost); in host_show_commands_outstanding() local
399 atomic_read(&h->commands_outstanding)); in host_show_commands_outstanding()
405 struct ctlr_info *h; in host_show_transport_mode() local
408 h = shost_to_hba(shost); in host_show_transport_mode()
410 h->transMethod & CFGTBL_Trans_Performant ? in host_show_transport_mode()
417 struct ctlr_info *h; in host_show_hp_ssd_smart_path_status() local
420 h = shost_to_hba(shost); in host_show_hp_ssd_smart_path_status()
422 (h->acciopath_status == 1) ? "enabled" : "disabled"); in host_show_hp_ssd_smart_path_status()
499 struct ctlr_info *h; in host_show_resettable() local
502 h = shost_to_hba(shost); in host_show_resettable()
503 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); in host_show_resettable()
528 struct ctlr_info *h; in raid_level_show() local
534 h = sdev_to_hba(sdev); in raid_level_show()
535 spin_lock_irqsave(&h->lock, flags); in raid_level_show()
538 spin_unlock_irqrestore(&h->lock, flags); in raid_level_show()
544 spin_unlock_irqrestore(&h->lock, flags); in raid_level_show()
550 spin_unlock_irqrestore(&h->lock, flags); in raid_level_show()
560 struct ctlr_info *h; in lunid_show() local
567 h = sdev_to_hba(sdev); in lunid_show()
568 spin_lock_irqsave(&h->lock, flags); in lunid_show()
571 spin_unlock_irqrestore(&h->lock, flags); in lunid_show()
575 spin_unlock_irqrestore(&h->lock, flags); in lunid_show()
584 struct ctlr_info *h; in unique_id_show() local
591 h = sdev_to_hba(sdev); in unique_id_show()
592 spin_lock_irqsave(&h->lock, flags); in unique_id_show()
595 spin_unlock_irqrestore(&h->lock, flags); in unique_id_show()
599 spin_unlock_irqrestore(&h->lock, flags); in unique_id_show()
612 struct ctlr_info *h; in host_show_hp_ssd_smart_path_enabled() local
619 h = sdev_to_hba(sdev); in host_show_hp_ssd_smart_path_enabled()
620 spin_lock_irqsave(&h->lock, flags); in host_show_hp_ssd_smart_path_enabled()
623 spin_unlock_irqrestore(&h->lock, flags); in host_show_hp_ssd_smart_path_enabled()
627 spin_unlock_irqrestore(&h->lock, flags); in host_show_hp_ssd_smart_path_enabled()
694 static inline u32 next_command(struct ctlr_info *h, u8 q) in next_command() argument
697 struct reply_queue_buffer *rq = &h->reply_queue[q]; in next_command()
699 if (h->transMethod & CFGTBL_Trans_io_accel1) in next_command()
700 return h->access.command_completed(h, q); in next_command()
702 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) in next_command()
703 return h->access.command_completed(h, q); in next_command()
708 atomic_dec(&h->commands_outstanding); in next_command()
713 if (rq->current_entry == h->max_commands) { in next_command()
750 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) in set_performant_mode() argument
752 if (likely(h->transMethod & CFGTBL_Trans_Performant)) { in set_performant_mode()
753 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); in set_performant_mode()
754 if (likely(h->msix_vector > 0)) in set_performant_mode()
756 raw_smp_processor_id() % h->nreply_queues; in set_performant_mode()
760 static void set_ioaccel1_performant_mode(struct ctlr_info *h, in set_ioaccel1_performant_mode() argument
763 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; in set_ioaccel1_performant_mode()
768 cp->ReplyQueue = smp_processor_id() % h->nreply_queues; in set_ioaccel1_performant_mode()
774 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | in set_ioaccel1_performant_mode()
778 static void set_ioaccel2_performant_mode(struct ctlr_info *h, in set_ioaccel2_performant_mode() argument
781 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; in set_ioaccel2_performant_mode()
786 cp->reply_queue = smp_processor_id() % h->nreply_queues; in set_ioaccel2_performant_mode()
792 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); in set_ioaccel2_performant_mode()
807 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, in dial_down_lockup_detection_during_fw_flash() argument
812 atomic_inc(&h->firmware_flash_in_progress); in dial_down_lockup_detection_during_fw_flash()
813 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; in dial_down_lockup_detection_during_fw_flash()
816 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, in dial_up_lockup_detection_on_fw_flash_complete() argument
820 atomic_dec_and_test(&h->firmware_flash_in_progress)) in dial_up_lockup_detection_on_fw_flash_complete()
821 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; in dial_up_lockup_detection_on_fw_flash_complete()
824 static void enqueue_cmd_and_start_io(struct ctlr_info *h, in enqueue_cmd_and_start_io() argument
827 dial_down_lockup_detection_during_fw_flash(h, c); in enqueue_cmd_and_start_io()
828 atomic_inc(&h->commands_outstanding); in enqueue_cmd_and_start_io()
831 set_ioaccel1_performant_mode(h, c); in enqueue_cmd_and_start_io()
832 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); in enqueue_cmd_and_start_io()
835 set_ioaccel2_performant_mode(h, c); in enqueue_cmd_and_start_io()
836 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); in enqueue_cmd_and_start_io()
839 set_performant_mode(h, c); in enqueue_cmd_and_start_io()
840 h->access.submit_command(h, c); in enqueue_cmd_and_start_io()
849 static inline int is_scsi_rev_5(struct ctlr_info *h) in is_scsi_rev_5() argument
851 if (!h->hba_inquiry_data) in is_scsi_rev_5()
853 if ((h->hba_inquiry_data[2] & 0x07) == 5) in is_scsi_rev_5()
858 static int hpsa_find_target_lun(struct ctlr_info *h, in hpsa_find_target_lun() argument
869 for (i = 0; i < h->ndevices; i++) { in hpsa_find_target_lun()
870 if (h->dev[i]->bus == bus && h->dev[i]->target != -1) in hpsa_find_target_lun()
871 __set_bit(h->dev[i]->target, lun_taken); in hpsa_find_target_lun()
885 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, in hpsa_scsi_add_entry() argument
890 int n = h->ndevices; in hpsa_scsi_add_entry()
896 dev_err(&h->pdev->dev, "too many devices, some will be " in hpsa_scsi_add_entry()
912 if (hpsa_find_target_lun(h, device->scsi3addr, in hpsa_scsi_add_entry()
927 sd = h->dev[i]; in hpsa_scsi_add_entry()
939 dev_warn(&h->pdev->dev, "physical device with no LUN=0," in hpsa_scsi_add_entry()
947 h->dev[n] = device; in hpsa_scsi_add_entry()
948 h->ndevices++; in hpsa_scsi_add_entry()
957 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", in hpsa_scsi_add_entry()
964 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, in hpsa_scsi_update_entry() argument
971 h->dev[entry]->raid_level = new_entry->raid_level; in hpsa_scsi_update_entry()
983 h->dev[entry]->raid_map = new_entry->raid_map; in hpsa_scsi_update_entry()
984 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; in hpsa_scsi_update_entry()
987 h->dev[entry]->offload_config = new_entry->offload_config; in hpsa_scsi_update_entry()
988 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; in hpsa_scsi_update_entry()
989 h->dev[entry]->offload_enabled = new_entry->offload_enabled; in hpsa_scsi_update_entry()
990 h->dev[entry]->queue_depth = new_entry->queue_depth; in hpsa_scsi_update_entry()
992 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", in hpsa_scsi_update_entry()
998 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, in hpsa_scsi_replace_entry() argument
1005 removed[*nremoved] = h->dev[entry]; in hpsa_scsi_replace_entry()
1013 new_entry->target = h->dev[entry]->target; in hpsa_scsi_replace_entry()
1014 new_entry->lun = h->dev[entry]->lun; in hpsa_scsi_replace_entry()
1017 h->dev[entry] = new_entry; in hpsa_scsi_replace_entry()
1020 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", in hpsa_scsi_replace_entry()
1026 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, in hpsa_scsi_remove_entry() argument
1035 sd = h->dev[entry]; in hpsa_scsi_remove_entry()
1036 removed[*nremoved] = h->dev[entry]; in hpsa_scsi_remove_entry()
1039 for (i = entry; i < h->ndevices-1; i++) in hpsa_scsi_remove_entry()
1040 h->dev[i] = h->dev[i+1]; in hpsa_scsi_remove_entry()
1041 h->ndevices--; in hpsa_scsi_remove_entry()
1042 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", in hpsa_scsi_remove_entry()
1057 static void fixup_botched_add(struct ctlr_info *h, in fixup_botched_add() argument
1066 spin_lock_irqsave(&h->lock, flags); in fixup_botched_add()
1067 for (i = 0; i < h->ndevices; i++) { in fixup_botched_add()
1068 if (h->dev[i] == added) { in fixup_botched_add()
1069 for (j = i; j < h->ndevices-1; j++) in fixup_botched_add()
1070 h->dev[j] = h->dev[j+1]; in fixup_botched_add()
1071 h->ndevices--; in fixup_botched_add()
1075 spin_unlock_irqrestore(&h->lock, flags); in fixup_botched_add()
1159 static void hpsa_monitor_offline_device(struct ctlr_info *h, in hpsa_monitor_offline_device() argument
1166 spin_lock_irqsave(&h->offline_device_lock, flags); in hpsa_monitor_offline_device()
1167 list_for_each_entry(device, &h->offline_device_list, offline_list) { in hpsa_monitor_offline_device()
1170 spin_unlock_irqrestore(&h->offline_device_lock, flags); in hpsa_monitor_offline_device()
1174 spin_unlock_irqrestore(&h->offline_device_lock, flags); in hpsa_monitor_offline_device()
1179 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__); in hpsa_monitor_offline_device()
1183 spin_lock_irqsave(&h->offline_device_lock, flags); in hpsa_monitor_offline_device()
1184 list_add_tail(&device->offline_list, &h->offline_device_list); in hpsa_monitor_offline_device()
1185 spin_unlock_irqrestore(&h->offline_device_lock, flags); in hpsa_monitor_offline_device()
1189 static void hpsa_show_volume_status(struct ctlr_info *h, in hpsa_show_volume_status() argument
1193 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1195 h->scsi_host->host_no, in hpsa_show_volume_status()
1201 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1203 h->scsi_host->host_no, in hpsa_show_volume_status()
1207 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1209 h->scsi_host->host_no, in hpsa_show_volume_status()
1213 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1215 h->scsi_host->host_no, in hpsa_show_volume_status()
1219 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1221 h->scsi_host->host_no, in hpsa_show_volume_status()
1225 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1227 h->scsi_host->host_no, in hpsa_show_volume_status()
1231 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1233 h->scsi_host->host_no, in hpsa_show_volume_status()
1237 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1239 h->scsi_host->host_no, in hpsa_show_volume_status()
1243 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1245 h->scsi_host->host_no, in hpsa_show_volume_status()
1249 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1251 h->scsi_host->host_no, in hpsa_show_volume_status()
1255 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1257 h->scsi_host->host_no, in hpsa_show_volume_status()
1267 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, in hpsa_figure_phys_disk_ptrs() argument
1301 qdepth = min(h->nr_cmds, qdepth + in hpsa_figure_phys_disk_ptrs()
1315 logical_drive->queue_depth = h->nr_cmds; in hpsa_figure_phys_disk_ptrs()
1325 logical_drive->queue_depth = h->nr_cmds; in hpsa_figure_phys_disk_ptrs()
1328 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, in hpsa_update_log_drive_phys_drive_ptrs() argument
1338 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); in hpsa_update_log_drive_phys_drive_ptrs()
1342 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, in adjust_hpsa_scsi_table() argument
1360 dev_warn(&h->pdev->dev, "out of memory in " in adjust_hpsa_scsi_table()
1365 spin_lock_irqsave(&h->devlock, flags); in adjust_hpsa_scsi_table()
1377 while (i < h->ndevices) { in adjust_hpsa_scsi_table()
1378 csd = h->dev[i]; in adjust_hpsa_scsi_table()
1382 hpsa_scsi_remove_entry(h, hostno, i, in adjust_hpsa_scsi_table()
1387 hpsa_scsi_replace_entry(h, hostno, i, sd[entry], in adjust_hpsa_scsi_table()
1394 hpsa_scsi_update_entry(h, hostno, i, sd[entry]); in adjust_hpsa_scsi_table()
1413 hpsa_show_volume_status(h, sd[i]); in adjust_hpsa_scsi_table()
1414 dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n", in adjust_hpsa_scsi_table()
1415 h->scsi_host->host_no, in adjust_hpsa_scsi_table()
1420 device_change = hpsa_scsi_find_entry(sd[i], h->dev, in adjust_hpsa_scsi_table()
1421 h->ndevices, &entry); in adjust_hpsa_scsi_table()
1424 if (hpsa_scsi_add_entry(h, hostno, sd[i], in adjust_hpsa_scsi_table()
1431 dev_warn(&h->pdev->dev, in adjust_hpsa_scsi_table()
1436 spin_unlock_irqrestore(&h->devlock, flags); in adjust_hpsa_scsi_table()
1446 hpsa_monitor_offline_device(h, sd[i]->scsi3addr); in adjust_hpsa_scsi_table()
1456 sh = h->scsi_host; in adjust_hpsa_scsi_table()
1470 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " in adjust_hpsa_scsi_table()
1483 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " in adjust_hpsa_scsi_table()
1489 fixup_botched_add(h, added[i]); in adjust_hpsa_scsi_table()
1501 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, in lookup_hpsa_scsi_dev() argument
1507 for (i = 0; i < h->ndevices; i++) { in lookup_hpsa_scsi_dev()
1508 sd = h->dev[i]; in lookup_hpsa_scsi_dev()
1520 struct ctlr_info *h; in hpsa_slave_alloc() local
1522 h = sdev_to_hba(sdev); in hpsa_slave_alloc()
1523 spin_lock_irqsave(&h->devlock, flags); in hpsa_slave_alloc()
1524 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), in hpsa_slave_alloc()
1532 spin_unlock_irqrestore(&h->devlock, flags); in hpsa_slave_alloc()
1541 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) in hpsa_free_sg_chain_blocks() argument
1545 if (!h->cmd_sg_list) in hpsa_free_sg_chain_blocks()
1547 for (i = 0; i < h->nr_cmds; i++) { in hpsa_free_sg_chain_blocks()
1548 kfree(h->cmd_sg_list[i]); in hpsa_free_sg_chain_blocks()
1549 h->cmd_sg_list[i] = NULL; in hpsa_free_sg_chain_blocks()
1551 kfree(h->cmd_sg_list); in hpsa_free_sg_chain_blocks()
1552 h->cmd_sg_list = NULL; in hpsa_free_sg_chain_blocks()
1555 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) in hpsa_allocate_sg_chain_blocks() argument
1559 if (h->chainsize <= 0) in hpsa_allocate_sg_chain_blocks()
1562 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, in hpsa_allocate_sg_chain_blocks()
1564 if (!h->cmd_sg_list) { in hpsa_allocate_sg_chain_blocks()
1565 dev_err(&h->pdev->dev, "Failed to allocate SG list\n"); in hpsa_allocate_sg_chain_blocks()
1568 for (i = 0; i < h->nr_cmds; i++) { in hpsa_allocate_sg_chain_blocks()
1569 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * in hpsa_allocate_sg_chain_blocks()
1570 h->chainsize, GFP_KERNEL); in hpsa_allocate_sg_chain_blocks()
1571 if (!h->cmd_sg_list[i]) { in hpsa_allocate_sg_chain_blocks()
1572 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n"); in hpsa_allocate_sg_chain_blocks()
1579 hpsa_free_sg_chain_blocks(h); in hpsa_allocate_sg_chain_blocks()
1583 static int hpsa_map_sg_chain_block(struct ctlr_info *h, in hpsa_map_sg_chain_block() argument
1590 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; in hpsa_map_sg_chain_block()
1591 chain_block = h->cmd_sg_list[c->cmdindex]; in hpsa_map_sg_chain_block()
1594 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); in hpsa_map_sg_chain_block()
1596 temp64 = pci_map_single(h->pdev, chain_block, chain_len, in hpsa_map_sg_chain_block()
1598 if (dma_mapping_error(&h->pdev->dev, temp64)) { in hpsa_map_sg_chain_block()
1607 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, in hpsa_unmap_sg_chain_block() argument
1612 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries) in hpsa_unmap_sg_chain_block()
1615 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; in hpsa_unmap_sg_chain_block()
1616 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr), in hpsa_unmap_sg_chain_block()
1625 static int handle_ioaccel_mode2_error(struct ctlr_info *h, in handle_ioaccel_mode2_error() argument
1639 dev_warn(&h->pdev->dev, in handle_ioaccel_mode2_error()
1661 dev_warn(&h->pdev->dev, in handle_ioaccel_mode2_error()
1667 dev_warn(&h->pdev->dev, in handle_ioaccel_mode2_error()
1677 dev_warn(&h->pdev->dev, in handle_ioaccel_mode2_error()
1683 dev_warn(&h->pdev->dev, in handle_ioaccel_mode2_error()
1692 dev_warn(&h->pdev->dev, in handle_ioaccel_mode2_error()
1702 dev_warn(&h->pdev->dev, "task management function rejected.\n"); in handle_ioaccel_mode2_error()
1706 dev_warn(&h->pdev->dev, "task management function invalid LUN\n"); in handle_ioaccel_mode2_error()
1709 dev_warn(&h->pdev->dev, in handle_ioaccel_mode2_error()
1720 static void process_ioaccel2_completion(struct ctlr_info *h, in process_ioaccel2_completion() argument
1724 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; in process_ioaccel2_completion()
1729 cmd_free(h, c); in process_ioaccel2_completion()
1747 if (handle_ioaccel_mode2_error(h, c, cmd, c2)) in process_ioaccel2_completion()
1750 cmd_free(h, c); in process_ioaccel2_completion()
1756 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); in process_ioaccel2_completion()
1762 struct ctlr_info *h; in complete_scsi_command() local
1773 h = cp->h; in complete_scsi_command()
1778 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) in complete_scsi_command()
1779 hpsa_unmap_sg_chain_block(h, cp); in complete_scsi_command()
1788 return process_ioaccel2_completion(h, cp, cmd, dev); in complete_scsi_command()
1796 cmd_free(h, cp); in complete_scsi_command()
1815 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; in complete_scsi_command()
1833 h->resubmit_wq, &cp->work); in complete_scsi_command()
1861 dev_warn(&h->pdev->dev, "cp %p has status 0x%x " in complete_scsi_command()
1868 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " in complete_scsi_command()
1890 dev_warn(&h->pdev->dev, in complete_scsi_command()
1907 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n", in complete_scsi_command()
1912 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n", in complete_scsi_command()
1917 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n", in complete_scsi_command()
1922 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n", in complete_scsi_command()
1927 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", in complete_scsi_command()
1932 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n", in complete_scsi_command()
1937 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n", in complete_scsi_command()
1942 dev_warn(&h->pdev->dev, "Command unabortable\n"); in complete_scsi_command()
1949 dev_warn(&h->pdev->dev, in complete_scsi_command()
1954 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", in complete_scsi_command()
1957 cmd_free(h, cp); in complete_scsi_command()
2001 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, in hpsa_scsi_do_simple_cmd_core() argument
2007 enqueue_cmd_and_start_io(h, c); in hpsa_scsi_do_simple_cmd_core()
2011 static u32 lockup_detected(struct ctlr_info *h) in lockup_detected() argument
2017 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); in lockup_detected()
2023 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, in hpsa_scsi_do_simple_cmd_core_if_no_lockup() argument
2027 if (unlikely(lockup_detected(h))) in hpsa_scsi_do_simple_cmd_core_if_no_lockup()
2030 hpsa_scsi_do_simple_cmd_core(h, c); in hpsa_scsi_do_simple_cmd_core_if_no_lockup()
2034 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, in hpsa_scsi_do_simple_cmd_with_retry() argument
2041 hpsa_scsi_do_simple_cmd_core(h, c); in hpsa_scsi_do_simple_cmd_with_retry()
2048 } while ((check_for_unit_attention(h, c) || in hpsa_scsi_do_simple_cmd_with_retry()
2049 check_for_busy(h, c)) && in hpsa_scsi_do_simple_cmd_with_retry()
2051 hpsa_pci_unmap(h->pdev, c, 1, data_direction); in hpsa_scsi_do_simple_cmd_with_retry()
2054 static void hpsa_print_cmd(struct ctlr_info *h, char *txt, in hpsa_print_cmd() argument
2060 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x" in hpsa_print_cmd()
2070 static void hpsa_scsi_interpret_error(struct ctlr_info *h, in hpsa_scsi_interpret_error() argument
2074 struct device *d = &cp->h->pdev->dev; in hpsa_scsi_interpret_error()
2079 hpsa_print_cmd(h, "SCSI status", cp); in hpsa_scsi_interpret_error()
2094 hpsa_print_cmd(h, "overrun condition", cp); in hpsa_scsi_interpret_error()
2100 hpsa_print_cmd(h, "invalid command", cp); in hpsa_scsi_interpret_error()
2105 hpsa_print_cmd(h, "protocol error", cp); in hpsa_scsi_interpret_error()
2108 hpsa_print_cmd(h, "hardware error", cp); in hpsa_scsi_interpret_error()
2111 hpsa_print_cmd(h, "connection lost", cp); in hpsa_scsi_interpret_error()
2114 hpsa_print_cmd(h, "aborted", cp); in hpsa_scsi_interpret_error()
2117 hpsa_print_cmd(h, "abort failed", cp); in hpsa_scsi_interpret_error()
2120 hpsa_print_cmd(h, "unsolicited abort", cp); in hpsa_scsi_interpret_error()
2123 hpsa_print_cmd(h, "timed out", cp); in hpsa_scsi_interpret_error()
2126 hpsa_print_cmd(h, "unabortable", cp); in hpsa_scsi_interpret_error()
2129 hpsa_print_cmd(h, "unknown status", cp); in hpsa_scsi_interpret_error()
2135 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, in hpsa_scsi_do_inquiry() argument
2143 c = cmd_alloc(h); in hpsa_scsi_do_inquiry()
2146 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); in hpsa_scsi_do_inquiry()
2150 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, in hpsa_scsi_do_inquiry()
2155 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); in hpsa_scsi_do_inquiry()
2158 hpsa_scsi_interpret_error(h, c); in hpsa_scsi_do_inquiry()
2162 cmd_free(h, c); in hpsa_scsi_do_inquiry()
2166 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, in hpsa_bmic_ctrl_mode_sense() argument
2174 c = cmd_alloc(h); in hpsa_bmic_ctrl_mode_sense()
2176 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); in hpsa_bmic_ctrl_mode_sense()
2180 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize, in hpsa_bmic_ctrl_mode_sense()
2185 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); in hpsa_bmic_ctrl_mode_sense()
2188 hpsa_scsi_interpret_error(h, c); in hpsa_bmic_ctrl_mode_sense()
2192 cmd_free(h, c); in hpsa_bmic_ctrl_mode_sense()
2196 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, in hpsa_send_reset() argument
2203 c = cmd_alloc(h); in hpsa_send_reset()
2206 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); in hpsa_send_reset()
2211 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, in hpsa_send_reset()
2214 hpsa_scsi_do_simple_cmd_core(h, c); in hpsa_send_reset()
2219 hpsa_scsi_interpret_error(h, c); in hpsa_send_reset()
2222 cmd_free(h, c); in hpsa_send_reset()
2226 static void hpsa_get_raid_level(struct ctlr_info *h, in hpsa_get_raid_level() argument
2236 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64); in hpsa_get_raid_level()
2247 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, in hpsa_debug_map_buff() argument
2258 if (h->raid_offload_debug < 2) in hpsa_debug_map_buff()
2261 dev_info(&h->pdev->dev, "structure_size = %u\n", in hpsa_debug_map_buff()
2263 dev_info(&h->pdev->dev, "volume_blk_size = %u\n", in hpsa_debug_map_buff()
2265 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", in hpsa_debug_map_buff()
2267 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", in hpsa_debug_map_buff()
2269 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", in hpsa_debug_map_buff()
2271 dev_info(&h->pdev->dev, "strip_size = %u\n", in hpsa_debug_map_buff()
2273 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", in hpsa_debug_map_buff()
2275 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", in hpsa_debug_map_buff()
2277 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", in hpsa_debug_map_buff()
2279 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", in hpsa_debug_map_buff()
2281 dev_info(&h->pdev->dev, "row_cnt = %u\n", in hpsa_debug_map_buff()
2283 dev_info(&h->pdev->dev, "layout_map_count = %u\n", in hpsa_debug_map_buff()
2285 dev_info(&h->pdev->dev, "flags = 0x%x\n", in hpsa_debug_map_buff()
2287 dev_info(&h->pdev->dev, "encrypytion = %s\n", in hpsa_debug_map_buff()
2290 dev_info(&h->pdev->dev, "dekindex = %u\n", in hpsa_debug_map_buff()
2294 dev_info(&h->pdev->dev, "Map%u:\n", map); in hpsa_debug_map_buff()
2297 dev_info(&h->pdev->dev, " Row%u:\n", row); in hpsa_debug_map_buff()
2301 dev_info(&h->pdev->dev, in hpsa_debug_map_buff()
2308 dev_info(&h->pdev->dev, in hpsa_debug_map_buff()
2316 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, in hpsa_debug_map_buff() argument
2323 static int hpsa_get_raid_map(struct ctlr_info *h, in hpsa_get_raid_map() argument
2330 c = cmd_alloc(h); in hpsa_get_raid_map()
2332 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); in hpsa_get_raid_map()
2335 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, in hpsa_get_raid_map()
2338 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); in hpsa_get_raid_map()
2339 cmd_free(h, c); in hpsa_get_raid_map()
2342 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); in hpsa_get_raid_map()
2345 hpsa_scsi_interpret_error(h, c); in hpsa_get_raid_map()
2346 cmd_free(h, c); in hpsa_get_raid_map()
2349 cmd_free(h, c); in hpsa_get_raid_map()
2354 dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); in hpsa_get_raid_map()
2357 hpsa_debug_map_buff(h, rc, &this_device->raid_map); in hpsa_get_raid_map()
2361 static int hpsa_bmic_id_physical_device(struct ctlr_info *h, in hpsa_bmic_id_physical_device() argument
2369 c = cmd_alloc(h); in hpsa_bmic_id_physical_device()
2370 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize, in hpsa_bmic_id_physical_device()
2378 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); in hpsa_bmic_id_physical_device()
2381 hpsa_scsi_interpret_error(h, c); in hpsa_bmic_id_physical_device()
2385 cmd_free(h, c); in hpsa_bmic_id_physical_device()
2389 static int hpsa_vpd_page_supported(struct ctlr_info *h, in hpsa_vpd_page_supported() argument
2402 rc = hpsa_scsi_do_inquiry(h, scsi3addr, in hpsa_vpd_page_supported()
2414 rc = hpsa_scsi_do_inquiry(h, scsi3addr, in hpsa_vpd_page_supported()
2432 static void hpsa_get_ioaccel_status(struct ctlr_info *h, in hpsa_get_ioaccel_status() argument
2445 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) in hpsa_get_ioaccel_status()
2447 rc = hpsa_scsi_do_inquiry(h, scsi3addr, in hpsa_get_ioaccel_status()
2461 if (hpsa_get_raid_map(h, scsi3addr, this_device)) in hpsa_get_ioaccel_status()
2470 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, in hpsa_get_device_id() argument
2481 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); in hpsa_get_device_id()
2488 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, in hpsa_scsi_do_report_luns() argument
2497 c = cmd_alloc(h); in hpsa_scsi_do_report_luns()
2499 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); in hpsa_scsi_do_report_luns()
2504 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, in hpsa_scsi_do_report_luns()
2511 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); in hpsa_scsi_do_report_luns()
2515 hpsa_scsi_interpret_error(h, c); in hpsa_scsi_do_report_luns()
2521 dev_err(&h->pdev->dev, in hpsa_scsi_do_report_luns()
2529 cmd_free(h, c); in hpsa_scsi_do_report_luns()
2533 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, in hpsa_scsi_do_report_phys_luns() argument
2536 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, in hpsa_scsi_do_report_phys_luns()
2540 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, in hpsa_scsi_do_report_log_luns() argument
2543 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); in hpsa_scsi_do_report_log_luns()
2555 static int hpsa_get_volume_status(struct ctlr_info *h, in hpsa_get_volume_status() argument
2568 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) in hpsa_get_volume_status()
2572 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, in hpsa_get_volume_status()
2579 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, in hpsa_get_volume_status()
2599 static int hpsa_volume_offline(struct ctlr_info *h, in hpsa_volume_offline() argument
2611 c = cmd_alloc(h); in hpsa_volume_offline()
2614 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); in hpsa_volume_offline()
2615 hpsa_scsi_do_simple_cmd_core(h, c); in hpsa_volume_offline()
2622 cmd_free(h, c); in hpsa_volume_offline()
2632 ldstat = hpsa_get_volume_status(h, scsi3addr); in hpsa_volume_offline()
2659 static int hpsa_update_device_info(struct ctlr_info *h, in hpsa_update_device_info() argument
2677 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, in hpsa_update_device_info()
2680 dev_err(&h->pdev->dev, in hpsa_update_device_info()
2693 hpsa_get_device_id(h, scsi3addr, this_device->device_id, in hpsa_update_device_info()
2700 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); in hpsa_update_device_info()
2701 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) in hpsa_update_device_info()
2702 hpsa_get_ioaccel_status(h, scsi3addr, this_device); in hpsa_update_device_info()
2703 volume_offline = hpsa_volume_offline(h, scsi3addr); in hpsa_update_device_info()
2712 this_device->queue_depth = h->nr_cmds; in hpsa_update_device_info()
2743 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) in is_ext_target() argument
2761 static void figure_bus_target_lun(struct ctlr_info *h, in figure_bus_target_lun() argument
2776 if (is_ext_target(h, device)) { in figure_bus_target_lun()
2799 static int add_ext_target_dev(struct ctlr_info *h, in add_ext_target_dev() argument
2812 if (!is_ext_target(h, tmpdevice)) in add_ext_target_dev()
2823 if (is_scsi_rev_5(h)) in add_ext_target_dev()
2827 dev_warn(&h->pdev->dev, "Maximum number of external " in add_ext_target_dev()
2833 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) in add_ext_target_dev()
2850 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, in hpsa_get_pdisk_of_ioaccel2() argument
2870 c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; in hpsa_get_pdisk_of_ioaccel2()
2886 if (h->raid_offload_debug > 0) in hpsa_get_pdisk_of_ioaccel2()
2887 dev_info(&h->pdev->dev, in hpsa_get_pdisk_of_ioaccel2()
2901 if (hpsa_scsi_do_report_phys_luns(h, physicals, reportsize)) { in hpsa_get_pdisk_of_ioaccel2()
2902 dev_err(&h->pdev->dev, in hpsa_get_pdisk_of_ioaccel2()
2920 if (h->raid_offload_debug > 0) in hpsa_get_pdisk_of_ioaccel2()
2921 dev_info(&h->pdev->dev, in hpsa_get_pdisk_of_ioaccel2()
2941 static int hpsa_gather_lun_info(struct ctlr_info *h, in hpsa_gather_lun_info() argument
2945 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { in hpsa_gather_lun_info()
2946 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); in hpsa_gather_lun_info()
2951 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n", in hpsa_gather_lun_info()
2955 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) { in hpsa_gather_lun_info()
2956 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); in hpsa_gather_lun_info()
2962 dev_warn(&h->pdev->dev, in hpsa_gather_lun_info()
2969 dev_warn(&h->pdev->dev, in hpsa_gather_lun_info()
2978 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, in figure_lunaddrbytes() argument
3005 static int hpsa_hba_mode_enabled(struct ctlr_info *h) in hpsa_hba_mode_enabled() argument
3015 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params, in hpsa_hba_mode_enabled()
3029 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, in hpsa_get_ioaccel_drive_info() argument
3040 rc = hpsa_bmic_id_physical_device(h, lunaddrbytes, in hpsa_get_ioaccel_drive_info()
3055 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) in hpsa_update_scsi_devices() argument
3088 dev_err(&h->pdev->dev, "out of memory\n"); in hpsa_update_scsi_devices()
3093 rescan_hba_mode = hpsa_hba_mode_enabled(h); in hpsa_update_scsi_devices()
3097 if (!h->hba_mode_enabled && rescan_hba_mode) in hpsa_update_scsi_devices()
3098 dev_warn(&h->pdev->dev, "HBA mode enabled\n"); in hpsa_update_scsi_devices()
3099 else if (h->hba_mode_enabled && !rescan_hba_mode) in hpsa_update_scsi_devices()
3100 dev_warn(&h->pdev->dev, "HBA mode disabled\n"); in hpsa_update_scsi_devices()
3102 h->hba_mode_enabled = rescan_hba_mode; in hpsa_update_scsi_devices()
3104 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals, in hpsa_update_scsi_devices()
3117 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." in hpsa_update_scsi_devices()
3125 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", in hpsa_update_scsi_devices()
3132 if (is_scsi_rev_5(h)) in hpsa_update_scsi_devices()
3143 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, in hpsa_update_scsi_devices()
3151 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, in hpsa_update_scsi_devices()
3154 figure_bus_target_lun(h, lunaddrbytes, tmpdevice); in hpsa_update_scsi_devices()
3164 if (add_ext_target_dev(h, tmpdevice, this_device, in hpsa_update_scsi_devices()
3186 if (h->hba_mode_enabled) { in hpsa_update_scsi_devices()
3191 } else if (h->acciopath_status) { in hpsa_update_scsi_devices()
3202 if (h->transMethod & CFGTBL_Trans_io_accel1 || in hpsa_update_scsi_devices()
3203 h->transMethod & CFGTBL_Trans_io_accel2) { in hpsa_update_scsi_devices()
3204 hpsa_get_ioaccel_drive_info(h, this_device, in hpsa_update_scsi_devices()
3230 hpsa_update_log_drive_phys_drive_ptrs(h, currentsd, ncurrent); in hpsa_update_scsi_devices()
3231 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); in hpsa_update_scsi_devices()
3258 static int hpsa_scatter_gather(struct ctlr_info *h, in hpsa_scatter_gather() argument
3266 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); in hpsa_scatter_gather()
3279 if (i == h->max_cmd_sg_entries - 1 && in hpsa_scatter_gather()
3280 use_sg > h->max_cmd_sg_entries) { in hpsa_scatter_gather()
3282 curr_sg = h->cmd_sg_list[cp->cmdindex]; in hpsa_scatter_gather()
3292 if (use_sg + chained > h->maxSG) in hpsa_scatter_gather()
3293 h->maxSG = use_sg + chained; in hpsa_scatter_gather()
3296 cp->Header.SGList = h->max_cmd_sg_entries; in hpsa_scatter_gather()
3298 if (hpsa_map_sg_chain_block(h, cp)) { in hpsa_scatter_gather()
3360 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, in hpsa_scsi_ioaccel1_queue_command() argument
3365 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; in hpsa_scsi_ioaccel1_queue_command()
3375 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { in hpsa_scsi_ioaccel1_queue_command()
3390 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + in hpsa_scsi_ioaccel1_queue_command()
3424 dev_err(&h->pdev->dev, "unknown data direction: %d\n", in hpsa_scsi_ioaccel1_queue_command()
3443 enqueue_cmd_and_start_io(h, c); in hpsa_scsi_ioaccel1_queue_command()
3451 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, in hpsa_scsi_ioaccel_direct_map() argument
3459 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, in hpsa_scsi_ioaccel_direct_map()
3466 static void set_encrypt_ioaccel2(struct ctlr_info *h, in set_encrypt_ioaccel2() argument
3505 dev_err(&h->pdev->dev, in set_encrypt_ioaccel2()
3520 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, in hpsa_scsi_ioaccel2_queue_command() argument
3525 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; in hpsa_scsi_ioaccel2_queue_command()
3533 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { in hpsa_scsi_ioaccel2_queue_command()
3545 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + in hpsa_scsi_ioaccel2_queue_command()
3588 dev_err(&h->pdev->dev, "unknown data direction: %d\n", in hpsa_scsi_ioaccel2_queue_command()
3599 set_encrypt_ioaccel2(h, c, cp); in hpsa_scsi_ioaccel2_queue_command()
3613 enqueue_cmd_and_start_io(h, c); in hpsa_scsi_ioaccel2_queue_command()
3620 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, in hpsa_scsi_ioaccel_queue_command() argument
3630 if (h->transMethod & CFGTBL_Trans_io_accel1) in hpsa_scsi_ioaccel_queue_command()
3631 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, in hpsa_scsi_ioaccel_queue_command()
3635 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, in hpsa_scsi_ioaccel_queue_command()
3669 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, in hpsa_scsi_ioaccel_raid_map() argument
3990 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, in hpsa_scsi_ioaccel_raid_map()
3996 static int hpsa_ciss_submit(struct ctlr_info *h, in hpsa_ciss_submit() argument
4046 dev_err(&h->pdev->dev, "unknown data direction: %d\n", in hpsa_ciss_submit()
4052 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ in hpsa_ciss_submit()
4053 cmd_free(h, c); in hpsa_ciss_submit()
4056 enqueue_cmd_and_start_io(h, c); in hpsa_ciss_submit()
4075 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) { in hpsa_command_resubmit_worker()
4089 struct ctlr_info *h; in hpsa_scsi_queue_command() local
4096 h = sdev_to_hba(cmd->device); in hpsa_scsi_queue_command()
4105 if (unlikely(lockup_detected(h))) { in hpsa_scsi_queue_command()
4110 c = cmd_alloc(h); in hpsa_scsi_queue_command()
4112 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); in hpsa_scsi_queue_command()
4115 if (unlikely(lockup_detected(h))) { in hpsa_scsi_queue_command()
4117 cmd_free(h, c); in hpsa_scsi_queue_command()
4128 h->acciopath_status)) { in hpsa_scsi_queue_command()
4135 rc = hpsa_scsi_ioaccel_raid_map(h, c); in hpsa_scsi_queue_command()
4139 cmd_free(h, c); in hpsa_scsi_queue_command()
4143 rc = hpsa_scsi_ioaccel_direct_map(h, c); in hpsa_scsi_queue_command()
4147 cmd_free(h, c); in hpsa_scsi_queue_command()
4152 return hpsa_ciss_submit(h, c, cmd, scsi3addr); in hpsa_scsi_queue_command()
4155 static void hpsa_scan_complete(struct ctlr_info *h) in hpsa_scan_complete() argument
4159 spin_lock_irqsave(&h->scan_lock, flags); in hpsa_scan_complete()
4160 h->scan_finished = 1; in hpsa_scan_complete()
4161 wake_up_all(&h->scan_wait_queue); in hpsa_scan_complete()
4162 spin_unlock_irqrestore(&h->scan_lock, flags); in hpsa_scan_complete()
4167 struct ctlr_info *h = shost_to_hba(sh); in hpsa_scan_start() local
4176 if (unlikely(lockup_detected(h))) in hpsa_scan_start()
4177 return hpsa_scan_complete(h); in hpsa_scan_start()
4181 spin_lock_irqsave(&h->scan_lock, flags); in hpsa_scan_start()
4182 if (h->scan_finished) in hpsa_scan_start()
4184 spin_unlock_irqrestore(&h->scan_lock, flags); in hpsa_scan_start()
4185 wait_event(h->scan_wait_queue, h->scan_finished); in hpsa_scan_start()
4192 h->scan_finished = 0; /* mark scan as in progress */ in hpsa_scan_start()
4193 spin_unlock_irqrestore(&h->scan_lock, flags); in hpsa_scan_start()
4195 if (unlikely(lockup_detected(h))) in hpsa_scan_start()
4196 return hpsa_scan_complete(h); in hpsa_scan_start()
4198 hpsa_update_scsi_devices(h, h->scsi_host->host_no); in hpsa_scan_start()
4200 hpsa_scan_complete(h); in hpsa_scan_start()
4221 struct ctlr_info *h = shost_to_hba(sh); in hpsa_scan_finished() local
4225 spin_lock_irqsave(&h->scan_lock, flags); in hpsa_scan_finished()
4226 finished = h->scan_finished; in hpsa_scan_finished()
4227 spin_unlock_irqrestore(&h->scan_lock, flags); in hpsa_scan_finished()
4231 static void hpsa_unregister_scsi(struct ctlr_info *h) in hpsa_unregister_scsi() argument
4234 scsi_remove_host(h->scsi_host); in hpsa_unregister_scsi()
4235 scsi_host_put(h->scsi_host); in hpsa_unregister_scsi()
4236 h->scsi_host = NULL; in hpsa_unregister_scsi()
4239 static int hpsa_register_scsi(struct ctlr_info *h) in hpsa_register_scsi() argument
4244 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); in hpsa_register_scsi()
4255 sh->can_queue = h->nr_cmds - in hpsa_register_scsi()
4260 sh->sg_tablesize = h->maxsgentries; in hpsa_register_scsi()
4261 h->scsi_host = sh; in hpsa_register_scsi()
4262 sh->hostdata[0] = (unsigned long) h; in hpsa_register_scsi()
4263 sh->irq = h->intr[h->intr_mode]; in hpsa_register_scsi()
4265 error = scsi_add_host(sh, &h->pdev->dev); in hpsa_register_scsi()
4272 dev_err(&h->pdev->dev, "%s: scsi_add_host" in hpsa_register_scsi()
4273 " failed for controller %d\n", __func__, h->ctlr); in hpsa_register_scsi()
4277 dev_err(&h->pdev->dev, "%s: scsi_host_alloc" in hpsa_register_scsi()
4278 " failed for controller %d\n", __func__, h->ctlr); in hpsa_register_scsi()
4282 static int wait_for_device_to_become_ready(struct ctlr_info *h, in wait_for_device_to_become_ready() argument
4290 c = cmd_alloc(h); in wait_for_device_to_become_ready()
4292 dev_warn(&h->pdev->dev, "out of memory in " in wait_for_device_to_become_ready()
4312 (void) fill_cmd(c, TEST_UNIT_READY, h, in wait_for_device_to_become_ready()
4314 hpsa_scsi_do_simple_cmd_core(h, c); in wait_for_device_to_become_ready()
4326 dev_warn(&h->pdev->dev, "waiting %d secs " in wait_for_device_to_become_ready()
4332 dev_warn(&h->pdev->dev, "giving up on device.\n"); in wait_for_device_to_become_ready()
4334 dev_warn(&h->pdev->dev, "device is ready.\n"); in wait_for_device_to_become_ready()
4336 cmd_free(h, c); in wait_for_device_to_become_ready()
4346 struct ctlr_info *h; in hpsa_eh_device_reset_handler() local
4350 h = sdev_to_hba(scsicmd->device); in hpsa_eh_device_reset_handler()
4351 if (h == NULL) /* paranoia */ in hpsa_eh_device_reset_handler()
4354 if (lockup_detected(h)) in hpsa_eh_device_reset_handler()
4359 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " in hpsa_eh_device_reset_handler()
4363 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", in hpsa_eh_device_reset_handler()
4364 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); in hpsa_eh_device_reset_handler()
4366 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN); in hpsa_eh_device_reset_handler()
4367 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) in hpsa_eh_device_reset_handler()
4370 dev_warn(&h->pdev->dev, "resetting device failed.\n"); in hpsa_eh_device_reset_handler()
4389 static void hpsa_get_tag(struct ctlr_info *h, in hpsa_get_tag() argument
4395 &h->ioaccel_cmd_pool[c->cmdindex]; in hpsa_get_tag()
4403 &h->ioaccel2_cmd_pool[c->cmdindex]; in hpsa_get_tag()
4414 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, in hpsa_send_abort() argument
4422 c = cmd_alloc(h); in hpsa_send_abort()
4424 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); in hpsa_send_abort()
4429 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort, in hpsa_send_abort()
4433 hpsa_scsi_do_simple_cmd_core(h, c); in hpsa_send_abort()
4434 hpsa_get_tag(h, abort, &taglower, &tagupper); in hpsa_send_abort()
4435 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n", in hpsa_send_abort()
4447 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", in hpsa_send_abort()
4449 hpsa_scsi_interpret_error(h, c); in hpsa_send_abort()
4453 cmd_free(h, c); in hpsa_send_abort()
4454 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", in hpsa_send_abort()
4466 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, in hpsa_send_reset_as_abort_ioaccel2() argument
4479 dev_warn(&h->pdev->dev, in hpsa_send_reset_as_abort_ioaccel2()
4484 if (h->raid_offload_debug > 0) in hpsa_send_reset_as_abort_ioaccel2()
4485 dev_info(&h->pdev->dev, in hpsa_send_reset_as_abort_ioaccel2()
4487 h->scsi_host->host_no, dev->bus, dev->target, dev->lun, in hpsa_send_reset_as_abort_ioaccel2()
4492 dev_warn(&h->pdev->dev, in hpsa_send_reset_as_abort_ioaccel2()
4498 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) { in hpsa_send_reset_as_abort_ioaccel2()
4499 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n"); in hpsa_send_reset_as_abort_ioaccel2()
4504 if (h->raid_offload_debug > 0) in hpsa_send_reset_as_abort_ioaccel2()
4505 dev_info(&h->pdev->dev, in hpsa_send_reset_as_abort_ioaccel2()
4509 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET); in hpsa_send_reset_as_abort_ioaccel2()
4511 dev_warn(&h->pdev->dev, in hpsa_send_reset_as_abort_ioaccel2()
4519 if (wait_for_device_to_become_ready(h, psa) != 0) { in hpsa_send_reset_as_abort_ioaccel2()
4520 dev_warn(&h->pdev->dev, in hpsa_send_reset_as_abort_ioaccel2()
4528 dev_info(&h->pdev->dev, in hpsa_send_reset_as_abort_ioaccel2()
4542 static int hpsa_send_abort_both_ways(struct ctlr_info *h, in hpsa_send_abort_both_ways() argument
4551 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); in hpsa_send_abort_both_ways()
4553 return hpsa_send_abort(h, scsi3addr, abort, 0) && in hpsa_send_abort_both_ways()
4554 hpsa_send_abort(h, scsi3addr, abort, 1); in hpsa_send_abort_both_ways()
4565 struct ctlr_info *h; in hpsa_eh_abort_handler() local
4575 h = sdev_to_hba(sc->device); in hpsa_eh_abort_handler()
4576 if (WARN(h == NULL, in hpsa_eh_abort_handler()
4580 if (lockup_detected(h)) in hpsa_eh_abort_handler()
4584 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && in hpsa_eh_abort_handler()
4585 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) in hpsa_eh_abort_handler()
4590 h->scsi_host->host_no, sc->device->channel, in hpsa_eh_abort_handler()
4596 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n", in hpsa_eh_abort_handler()
4609 cmd_free(h, abort); in hpsa_eh_abort_handler()
4612 hpsa_get_tag(h, abort, &taglower, &tagupper); in hpsa_eh_abort_handler()
4618 dev_dbg(&h->pdev->dev, "%s\n", msg); in hpsa_eh_abort_handler()
4619 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n", in hpsa_eh_abort_handler()
4620 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); in hpsa_eh_abort_handler()
4626 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort); in hpsa_eh_abort_handler()
4628 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg); in hpsa_eh_abort_handler()
4629 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n", in hpsa_eh_abort_handler()
4630 h->scsi_host->host_no, in hpsa_eh_abort_handler()
4632 cmd_free(h, abort); in hpsa_eh_abort_handler()
4635 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg); in hpsa_eh_abort_handler()
4646 cmd_free(h, abort); in hpsa_eh_abort_handler()
4652 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n", in hpsa_eh_abort_handler()
4654 cmd_free(h, abort); in hpsa_eh_abort_handler()
4665 static struct CommandList *cmd_alloc(struct ctlr_info *h) in cmd_alloc() argument
4686 offset = h->last_allocation; /* benignly racy */ in cmd_alloc()
4688 i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset); in cmd_alloc()
4689 if (unlikely(i == h->nr_cmds)) { in cmd_alloc()
4693 c = h->cmd_pool + i; in cmd_alloc()
4696 cmd_free(h, c); /* already in use */ in cmd_alloc()
4697 offset = (i + 1) % h->nr_cmds; in cmd_alloc()
4701 h->cmd_pool_bits + (i / BITS_PER_LONG)); in cmd_alloc()
4704 h->last_allocation = i; /* benignly racy */ in cmd_alloc()
4709 cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c); in cmd_alloc()
4710 c->err_info = h->errinfo_pool + i; in cmd_alloc()
4712 err_dma_handle = h->errinfo_pool_dhandle in cmd_alloc()
4722 c->h = h; in cmd_alloc()
4726 static void cmd_free(struct ctlr_info *h, struct CommandList *c) in cmd_free() argument
4731 i = c - h->cmd_pool; in cmd_free()
4733 h->cmd_pool_bits + (i / BITS_PER_LONG)); in cmd_free()
4844 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) in hpsa_getpciinfo_ioctl() argument
4850 pciinfo.domain = pci_domain_nr(h->pdev->bus); in hpsa_getpciinfo_ioctl()
4851 pciinfo.bus = h->pdev->bus->number; in hpsa_getpciinfo_ioctl()
4852 pciinfo.dev_fn = h->pdev->devfn; in hpsa_getpciinfo_ioctl()
4853 pciinfo.board_id = h->board_id; in hpsa_getpciinfo_ioctl()
4859 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) in hpsa_getdrivver_ioctl() argument
4868 dev_info(&h->pdev->dev, "driver version string '%s' " in hpsa_getdrivver_ioctl()
4882 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) in hpsa_passthru_ioctl() argument
4915 c = cmd_alloc(h); in hpsa_passthru_ioctl()
4939 temp64 = pci_map_single(h->pdev, buff, in hpsa_passthru_ioctl()
4941 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { in hpsa_passthru_ioctl()
4951 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); in hpsa_passthru_ioctl()
4953 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); in hpsa_passthru_ioctl()
4954 check_ioctl_unit_attention(h, c); in hpsa_passthru_ioctl()
4972 cmd_free(h, c); in hpsa_passthru_ioctl()
4978 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) in hpsa_big_passthru_ioctl() argument
5050 c = cmd_alloc(h); in hpsa_big_passthru_ioctl()
5064 temp64 = pci_map_single(h->pdev, buff[i], in hpsa_big_passthru_ioctl()
5066 if (dma_mapping_error(&h->pdev->dev, in hpsa_big_passthru_ioctl()
5070 hpsa_pci_unmap(h->pdev, c, i, in hpsa_big_passthru_ioctl()
5081 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); in hpsa_big_passthru_ioctl()
5083 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); in hpsa_big_passthru_ioctl()
5084 check_ioctl_unit_attention(h, c); in hpsa_big_passthru_ioctl()
5106 cmd_free(h, c); in hpsa_big_passthru_ioctl()
5120 static void check_ioctl_unit_attention(struct ctlr_info *h, in check_ioctl_unit_attention() argument
5125 (void) check_for_unit_attention(h, c); in check_ioctl_unit_attention()
5133 struct ctlr_info *h; in hpsa_ioctl() local
5137 h = sdev_to_hba(dev); in hpsa_ioctl()
5143 hpsa_scan_start(h->scsi_host); in hpsa_ioctl()
5146 return hpsa_getpciinfo_ioctl(h, argp); in hpsa_ioctl()
5148 return hpsa_getdrivver_ioctl(h, argp); in hpsa_ioctl()
5150 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) in hpsa_ioctl()
5152 rc = hpsa_passthru_ioctl(h, argp); in hpsa_ioctl()
5153 atomic_inc(&h->passthru_cmds_avail); in hpsa_ioctl()
5156 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) in hpsa_ioctl()
5158 rc = hpsa_big_passthru_ioctl(h, argp); in hpsa_ioctl()
5159 atomic_inc(&h->passthru_cmds_avail); in hpsa_ioctl()
5166 static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, in hpsa_send_host_reset() argument
5171 c = cmd_alloc(h); in hpsa_send_host_reset()
5175 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, in hpsa_send_host_reset()
5179 enqueue_cmd_and_start_io(h, c); in hpsa_send_host_reset()
5187 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, in fill_cmd() argument
5285 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); in fill_cmd()
5309 dev_dbg(&h->pdev->dev, in fill_cmd()
5330 dev_warn(&h->pdev->dev, "unknown message type %d\n", in fill_cmd()
5335 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); in fill_cmd()
5352 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir)) in fill_cmd()
5370 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) in get_next_completion() argument
5372 return h->access.command_completed(h, q); in get_next_completion()
5375 static inline bool interrupt_pending(struct ctlr_info *h) in interrupt_pending() argument
5377 return h->access.intr_pending(h); in interrupt_pending()
5380 static inline long interrupt_not_for_us(struct ctlr_info *h) in interrupt_not_for_us() argument
5382 return (h->access.intr_pending(h) == 0) || in interrupt_not_for_us()
5383 (h->interrupts_enabled == 0); in interrupt_not_for_us()
5386 static inline int bad_tag(struct ctlr_info *h, u32 tag_index, in bad_tag() argument
5389 if (unlikely(tag_index >= h->nr_cmds)) { in bad_tag()
5390 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); in bad_tag()
5398 dial_up_lockup_detection_on_fw_flash_complete(c->h, c); in finish_cmd()
5407 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) in hpsa_tag_discard_error_bits() argument
5411 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) in hpsa_tag_discard_error_bits()
5417 static inline void process_indexed_cmd(struct ctlr_info *h, in process_indexed_cmd() argument
5424 if (!bad_tag(h, tag_index, raw_tag)) { in process_indexed_cmd()
5425 c = h->cmd_pool + tag_index; in process_indexed_cmd()
5435 static int ignore_bogus_interrupt(struct ctlr_info *h) in ignore_bogus_interrupt() argument
5440 if (likely(h->interrupts_enabled)) in ignore_bogus_interrupt()
5443 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " in ignore_bogus_interrupt()
5461 struct ctlr_info *h = queue_to_hba(queue); in hpsa_intx_discard_completions() local
5465 if (ignore_bogus_interrupt(h)) in hpsa_intx_discard_completions()
5468 if (interrupt_not_for_us(h)) in hpsa_intx_discard_completions()
5470 h->last_intr_timestamp = get_jiffies_64(); in hpsa_intx_discard_completions()
5471 while (interrupt_pending(h)) { in hpsa_intx_discard_completions()
5472 raw_tag = get_next_completion(h, q); in hpsa_intx_discard_completions()
5474 raw_tag = next_command(h, q); in hpsa_intx_discard_completions()
5481 struct ctlr_info *h = queue_to_hba(queue); in hpsa_msix_discard_completions() local
5485 if (ignore_bogus_interrupt(h)) in hpsa_msix_discard_completions()
5488 h->last_intr_timestamp = get_jiffies_64(); in hpsa_msix_discard_completions()
5489 raw_tag = get_next_completion(h, q); in hpsa_msix_discard_completions()
5491 raw_tag = next_command(h, q); in hpsa_msix_discard_completions()
5497 struct ctlr_info *h = queue_to_hba((u8 *) queue); in do_hpsa_intr_intx() local
5501 if (interrupt_not_for_us(h)) in do_hpsa_intr_intx()
5503 h->last_intr_timestamp = get_jiffies_64(); in do_hpsa_intr_intx()
5504 while (interrupt_pending(h)) { in do_hpsa_intr_intx()
5505 raw_tag = get_next_completion(h, q); in do_hpsa_intr_intx()
5507 process_indexed_cmd(h, raw_tag); in do_hpsa_intr_intx()
5508 raw_tag = next_command(h, q); in do_hpsa_intr_intx()
5516 struct ctlr_info *h = queue_to_hba(queue); in do_hpsa_intr_msi() local
5520 h->last_intr_timestamp = get_jiffies_64(); in do_hpsa_intr_msi()
5521 raw_tag = get_next_completion(h, q); in do_hpsa_intr_msi()
5523 process_indexed_cmd(h, raw_tag); in do_hpsa_intr_msi()
5524 raw_tag = next_command(h, q); in do_hpsa_intr_msi()
5937 static void hpsa_interrupt_mode(struct ctlr_info *h) in hpsa_interrupt_mode() argument
5949 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || in hpsa_interrupt_mode()
5950 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) in hpsa_interrupt_mode()
5952 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { in hpsa_interrupt_mode()
5953 dev_info(&h->pdev->dev, "MSI-X capable controller\n"); in hpsa_interrupt_mode()
5954 h->msix_vector = MAX_REPLY_QUEUES; in hpsa_interrupt_mode()
5955 if (h->msix_vector > num_online_cpus()) in hpsa_interrupt_mode()
5956 h->msix_vector = num_online_cpus(); in hpsa_interrupt_mode()
5957 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries, in hpsa_interrupt_mode()
5958 1, h->msix_vector); in hpsa_interrupt_mode()
5960 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); in hpsa_interrupt_mode()
5961 h->msix_vector = 0; in hpsa_interrupt_mode()
5963 } else if (err < h->msix_vector) { in hpsa_interrupt_mode()
5964 dev_warn(&h->pdev->dev, "only %d MSI-X vectors " in hpsa_interrupt_mode()
5967 h->msix_vector = err; in hpsa_interrupt_mode()
5968 for (i = 0; i < h->msix_vector; i++) in hpsa_interrupt_mode()
5969 h->intr[i] = hpsa_msix_entries[i].vector; in hpsa_interrupt_mode()
5973 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { in hpsa_interrupt_mode()
5974 dev_info(&h->pdev->dev, "MSI capable controller\n"); in hpsa_interrupt_mode()
5975 if (!pci_enable_msi(h->pdev)) in hpsa_interrupt_mode()
5976 h->msi_vector = 1; in hpsa_interrupt_mode()
5978 dev_warn(&h->pdev->dev, "MSI init failed\n"); in hpsa_interrupt_mode()
5983 h->intr[h->intr_mode] = h->pdev->irq; in hpsa_interrupt_mode()
6067 static int hpsa_find_cfgtables(struct ctlr_info *h) in hpsa_find_cfgtables() argument
6075 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, in hpsa_find_cfgtables()
6079 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, in hpsa_find_cfgtables()
6080 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); in hpsa_find_cfgtables()
6081 if (!h->cfgtable) { in hpsa_find_cfgtables()
6082 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n"); in hpsa_find_cfgtables()
6085 rc = write_driver_ver_to_cfgtable(h->cfgtable); in hpsa_find_cfgtables()
6089 trans_offset = readl(&h->cfgtable->TransMethodOffset); in hpsa_find_cfgtables()
6090 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, in hpsa_find_cfgtables()
6092 sizeof(*h->transtable)); in hpsa_find_cfgtables()
6093 if (!h->transtable) in hpsa_find_cfgtables()
6098 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) in hpsa_get_max_perf_mode_cmds() argument
6100 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); in hpsa_get_max_perf_mode_cmds()
6103 if (reset_devices && h->max_commands > 32) in hpsa_get_max_perf_mode_cmds()
6104 h->max_commands = 32; in hpsa_get_max_perf_mode_cmds()
6106 if (h->max_commands < 16) { in hpsa_get_max_perf_mode_cmds()
6107 dev_warn(&h->pdev->dev, "Controller reports " in hpsa_get_max_perf_mode_cmds()
6110 h->max_commands); in hpsa_get_max_perf_mode_cmds()
6111 h->max_commands = 16; in hpsa_get_max_perf_mode_cmds()
6119 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h) in hpsa_supports_chained_sg_blocks() argument
6121 return h->maxsgentries > 512; in hpsa_supports_chained_sg_blocks()
6128 static void hpsa_find_board_params(struct ctlr_info *h) in hpsa_find_board_params() argument
6130 hpsa_get_max_perf_mode_cmds(h); in hpsa_find_board_params()
6131 h->nr_cmds = h->max_commands; in hpsa_find_board_params()
6132 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); in hpsa_find_board_params()
6133 h->fw_support = readl(&(h->cfgtable->misc_fw_support)); in hpsa_find_board_params()
6134 if (hpsa_supports_chained_sg_blocks(h)) { in hpsa_find_board_params()
6136 h->max_cmd_sg_entries = 32; in hpsa_find_board_params()
6137 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; in hpsa_find_board_params()
6138 h->maxsgentries--; /* save one for chain pointer */ in hpsa_find_board_params()
6145 h->max_cmd_sg_entries = 31; in hpsa_find_board_params()
6146 h->maxsgentries = 31; /* default to traditional values */ in hpsa_find_board_params()
6147 h->chainsize = 0; in hpsa_find_board_params()
6151 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); in hpsa_find_board_params()
6152 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) in hpsa_find_board_params()
6153 dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); in hpsa_find_board_params()
6154 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) in hpsa_find_board_params()
6155 dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); in hpsa_find_board_params()
6158 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) in hpsa_CISS_signature_present() argument
6160 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { in hpsa_CISS_signature_present()
6161 dev_err(&h->pdev->dev, "not a valid CISS config table\n"); in hpsa_CISS_signature_present()
6167 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) in hpsa_set_driver_support_bits() argument
6171 driver_support = readl(&(h->cfgtable->driver_support)); in hpsa_set_driver_support_bits()
6177 writel(driver_support, &(h->cfgtable->driver_support)); in hpsa_set_driver_support_bits()
6183 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) in hpsa_p600_dma_prefetch_quirk() argument
6187 if (h->board_id != 0x3225103C) in hpsa_p600_dma_prefetch_quirk()
6189 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); in hpsa_p600_dma_prefetch_quirk()
6191 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); in hpsa_p600_dma_prefetch_quirk()
6194 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) in hpsa_wait_for_clear_event_notify_ack() argument
6201 spin_lock_irqsave(&h->lock, flags); in hpsa_wait_for_clear_event_notify_ack()
6202 doorbell_value = readl(h->vaddr + SA5_DOORBELL); in hpsa_wait_for_clear_event_notify_ack()
6203 spin_unlock_irqrestore(&h->lock, flags); in hpsa_wait_for_clear_event_notify_ack()
6214 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h) in hpsa_wait_for_mode_change_ack() argument
6225 spin_lock_irqsave(&h->lock, flags); in hpsa_wait_for_mode_change_ack()
6226 doorbell_value = readl(h->vaddr + SA5_DOORBELL); in hpsa_wait_for_mode_change_ack()
6227 spin_unlock_irqrestore(&h->lock, flags); in hpsa_wait_for_mode_change_ack()
6239 static int hpsa_enter_simple_mode(struct ctlr_info *h) in hpsa_enter_simple_mode() argument
6243 trans_support = readl(&(h->cfgtable->TransportSupport)); in hpsa_enter_simple_mode()
6247 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); in hpsa_enter_simple_mode()
6250 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); in hpsa_enter_simple_mode()
6251 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); in hpsa_enter_simple_mode()
6252 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); in hpsa_enter_simple_mode()
6253 if (hpsa_wait_for_mode_change_ack(h)) in hpsa_enter_simple_mode()
6255 print_cfg_table(&h->pdev->dev, h->cfgtable); in hpsa_enter_simple_mode()
6256 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) in hpsa_enter_simple_mode()
6258 h->transMethod = CFGTBL_Trans_Simple; in hpsa_enter_simple_mode()
6261 dev_err(&h->pdev->dev, "failed to enter simple mode\n"); in hpsa_enter_simple_mode()
6265 static int hpsa_pci_init(struct ctlr_info *h) in hpsa_pci_init() argument
6269 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); in hpsa_pci_init()
6272 h->product_name = products[prod_index].product_name; in hpsa_pci_init()
6273 h->access = *(products[prod_index].access); in hpsa_pci_init()
6275 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | in hpsa_pci_init()
6278 err = pci_enable_device(h->pdev); in hpsa_pci_init()
6280 dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); in hpsa_pci_init()
6284 err = pci_request_regions(h->pdev, HPSA); in hpsa_pci_init()
6286 dev_err(&h->pdev->dev, in hpsa_pci_init()
6291 pci_set_master(h->pdev); in hpsa_pci_init()
6293 hpsa_interrupt_mode(h); in hpsa_pci_init()
6294 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); in hpsa_pci_init()
6297 h->vaddr = remap_pci_mem(h->paddr, 0x250); in hpsa_pci_init()
6298 if (!h->vaddr) { in hpsa_pci_init()
6302 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); in hpsa_pci_init()
6305 err = hpsa_find_cfgtables(h); in hpsa_pci_init()
6308 hpsa_find_board_params(h); in hpsa_pci_init()
6310 if (!hpsa_CISS_signature_present(h)) { in hpsa_pci_init()
6314 hpsa_set_driver_support_bits(h); in hpsa_pci_init()
6315 hpsa_p600_dma_prefetch_quirk(h); in hpsa_pci_init()
6316 err = hpsa_enter_simple_mode(h); in hpsa_pci_init()
6322 if (h->transtable) in hpsa_pci_init()
6323 iounmap(h->transtable); in hpsa_pci_init()
6324 if (h->cfgtable) in hpsa_pci_init()
6325 iounmap(h->cfgtable); in hpsa_pci_init()
6326 if (h->vaddr) in hpsa_pci_init()
6327 iounmap(h->vaddr); in hpsa_pci_init()
6328 pci_disable_device(h->pdev); in hpsa_pci_init()
6329 pci_release_regions(h->pdev); in hpsa_pci_init()
6333 static void hpsa_hba_inquiry(struct ctlr_info *h) in hpsa_hba_inquiry() argument
6338 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); in hpsa_hba_inquiry()
6339 if (!h->hba_inquiry_data) in hpsa_hba_inquiry()
6341 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, in hpsa_hba_inquiry()
6342 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); in hpsa_hba_inquiry()
6344 kfree(h->hba_inquiry_data); in hpsa_hba_inquiry()
6345 h->hba_inquiry_data = NULL; in hpsa_hba_inquiry()
6411 static int hpsa_allocate_cmd_pool(struct ctlr_info *h) in hpsa_allocate_cmd_pool() argument
6413 h->cmd_pool_bits = kzalloc( in hpsa_allocate_cmd_pool()
6414 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * in hpsa_allocate_cmd_pool()
6416 h->cmd_pool = pci_alloc_consistent(h->pdev, in hpsa_allocate_cmd_pool()
6417 h->nr_cmds * sizeof(*h->cmd_pool), in hpsa_allocate_cmd_pool()
6418 &(h->cmd_pool_dhandle)); in hpsa_allocate_cmd_pool()
6419 h->errinfo_pool = pci_alloc_consistent(h->pdev, in hpsa_allocate_cmd_pool()
6420 h->nr_cmds * sizeof(*h->errinfo_pool), in hpsa_allocate_cmd_pool()
6421 &(h->errinfo_pool_dhandle)); in hpsa_allocate_cmd_pool()
6422 if ((h->cmd_pool_bits == NULL) in hpsa_allocate_cmd_pool()
6423 || (h->cmd_pool == NULL) in hpsa_allocate_cmd_pool()
6424 || (h->errinfo_pool == NULL)) { in hpsa_allocate_cmd_pool()
6425 dev_err(&h->pdev->dev, "out of memory in %s", __func__); in hpsa_allocate_cmd_pool()
6430 hpsa_free_cmd_pool(h); in hpsa_allocate_cmd_pool()
6434 static void hpsa_free_cmd_pool(struct ctlr_info *h) in hpsa_free_cmd_pool() argument
6436 kfree(h->cmd_pool_bits); in hpsa_free_cmd_pool()
6437 if (h->cmd_pool) in hpsa_free_cmd_pool()
6438 pci_free_consistent(h->pdev, in hpsa_free_cmd_pool()
6439 h->nr_cmds * sizeof(struct CommandList), in hpsa_free_cmd_pool()
6440 h->cmd_pool, h->cmd_pool_dhandle); in hpsa_free_cmd_pool()
6441 if (h->ioaccel2_cmd_pool) in hpsa_free_cmd_pool()
6442 pci_free_consistent(h->pdev, in hpsa_free_cmd_pool()
6443 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), in hpsa_free_cmd_pool()
6444 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); in hpsa_free_cmd_pool()
6445 if (h->errinfo_pool) in hpsa_free_cmd_pool()
6446 pci_free_consistent(h->pdev, in hpsa_free_cmd_pool()
6447 h->nr_cmds * sizeof(struct ErrorInfo), in hpsa_free_cmd_pool()
6448 h->errinfo_pool, in hpsa_free_cmd_pool()
6449 h->errinfo_pool_dhandle); in hpsa_free_cmd_pool()
6450 if (h->ioaccel_cmd_pool) in hpsa_free_cmd_pool()
6451 pci_free_consistent(h->pdev, in hpsa_free_cmd_pool()
6452 h->nr_cmds * sizeof(struct io_accel1_cmd), in hpsa_free_cmd_pool()
6453 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); in hpsa_free_cmd_pool()
6456 static void hpsa_irq_affinity_hints(struct ctlr_info *h) in hpsa_irq_affinity_hints() argument
6461 for (i = 0; i < h->msix_vector; i++) { in hpsa_irq_affinity_hints()
6462 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); in hpsa_irq_affinity_hints()
6468 static void hpsa_free_irqs(struct ctlr_info *h) in hpsa_free_irqs() argument
6472 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { in hpsa_free_irqs()
6474 i = h->intr_mode; in hpsa_free_irqs()
6475 irq_set_affinity_hint(h->intr[i], NULL); in hpsa_free_irqs()
6476 free_irq(h->intr[i], &h->q[i]); in hpsa_free_irqs()
6480 for (i = 0; i < h->msix_vector; i++) { in hpsa_free_irqs()
6481 irq_set_affinity_hint(h->intr[i], NULL); in hpsa_free_irqs()
6482 free_irq(h->intr[i], &h->q[i]); in hpsa_free_irqs()
6485 h->q[i] = 0; in hpsa_free_irqs()
6489 static int hpsa_request_irqs(struct ctlr_info *h, in hpsa_request_irqs() argument
6500 h->q[i] = (u8) i; in hpsa_request_irqs()
6502 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { in hpsa_request_irqs()
6504 for (i = 0; i < h->msix_vector; i++) { in hpsa_request_irqs()
6505 rc = request_irq(h->intr[i], msixhandler, in hpsa_request_irqs()
6506 0, h->devname, in hpsa_request_irqs()
6507 &h->q[i]); in hpsa_request_irqs()
6511 dev_err(&h->pdev->dev, in hpsa_request_irqs()
6513 h->intr[i], h->devname); in hpsa_request_irqs()
6515 free_irq(h->intr[j], &h->q[j]); in hpsa_request_irqs()
6516 h->q[j] = 0; in hpsa_request_irqs()
6519 h->q[j] = 0; in hpsa_request_irqs()
6523 hpsa_irq_affinity_hints(h); in hpsa_request_irqs()
6526 if (h->msix_vector > 0 || h->msi_vector) { in hpsa_request_irqs()
6527 rc = request_irq(h->intr[h->intr_mode], in hpsa_request_irqs()
6528 msixhandler, 0, h->devname, in hpsa_request_irqs()
6529 &h->q[h->intr_mode]); in hpsa_request_irqs()
6531 rc = request_irq(h->intr[h->intr_mode], in hpsa_request_irqs()
6532 intxhandler, IRQF_SHARED, h->devname, in hpsa_request_irqs()
6533 &h->q[h->intr_mode]); in hpsa_request_irqs()
6537 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", in hpsa_request_irqs()
6538 h->intr[h->intr_mode], h->devname); in hpsa_request_irqs()
6544 static int hpsa_kdump_soft_reset(struct ctlr_info *h) in hpsa_kdump_soft_reset() argument
6546 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, in hpsa_kdump_soft_reset()
6548 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); in hpsa_kdump_soft_reset()
6552 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); in hpsa_kdump_soft_reset()
6553 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { in hpsa_kdump_soft_reset()
6554 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); in hpsa_kdump_soft_reset()
6558 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); in hpsa_kdump_soft_reset()
6559 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { in hpsa_kdump_soft_reset()
6560 dev_warn(&h->pdev->dev, "Board failed to become ready " in hpsa_kdump_soft_reset()
6568 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) in hpsa_free_irqs_and_disable_msix() argument
6570 hpsa_free_irqs(h); in hpsa_free_irqs_and_disable_msix()
6572 if (h->msix_vector) { in hpsa_free_irqs_and_disable_msix()
6573 if (h->pdev->msix_enabled) in hpsa_free_irqs_and_disable_msix()
6574 pci_disable_msix(h->pdev); in hpsa_free_irqs_and_disable_msix()
6575 } else if (h->msi_vector) { in hpsa_free_irqs_and_disable_msix()
6576 if (h->pdev->msi_enabled) in hpsa_free_irqs_and_disable_msix()
6577 pci_disable_msi(h->pdev); in hpsa_free_irqs_and_disable_msix()
6582 static void hpsa_free_reply_queues(struct ctlr_info *h) in hpsa_free_reply_queues() argument
6586 for (i = 0; i < h->nreply_queues; i++) { in hpsa_free_reply_queues()
6587 if (!h->reply_queue[i].head) in hpsa_free_reply_queues()
6589 pci_free_consistent(h->pdev, h->reply_queue_size, in hpsa_free_reply_queues()
6590 h->reply_queue[i].head, h->reply_queue[i].busaddr); in hpsa_free_reply_queues()
6591 h->reply_queue[i].head = NULL; in hpsa_free_reply_queues()
6592 h->reply_queue[i].busaddr = 0; in hpsa_free_reply_queues()
6596 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) in hpsa_undo_allocations_after_kdump_soft_reset() argument
6598 hpsa_free_irqs_and_disable_msix(h); in hpsa_undo_allocations_after_kdump_soft_reset()
6599 hpsa_free_sg_chain_blocks(h); in hpsa_undo_allocations_after_kdump_soft_reset()
6600 hpsa_free_cmd_pool(h); in hpsa_undo_allocations_after_kdump_soft_reset()
6601 kfree(h->ioaccel1_blockFetchTable); in hpsa_undo_allocations_after_kdump_soft_reset()
6602 kfree(h->blockFetchTable); in hpsa_undo_allocations_after_kdump_soft_reset()
6603 hpsa_free_reply_queues(h); in hpsa_undo_allocations_after_kdump_soft_reset()
6604 if (h->vaddr) in hpsa_undo_allocations_after_kdump_soft_reset()
6605 iounmap(h->vaddr); in hpsa_undo_allocations_after_kdump_soft_reset()
6606 if (h->transtable) in hpsa_undo_allocations_after_kdump_soft_reset()
6607 iounmap(h->transtable); in hpsa_undo_allocations_after_kdump_soft_reset()
6608 if (h->cfgtable) in hpsa_undo_allocations_after_kdump_soft_reset()
6609 iounmap(h->cfgtable); in hpsa_undo_allocations_after_kdump_soft_reset()
6610 pci_disable_device(h->pdev); in hpsa_undo_allocations_after_kdump_soft_reset()
6611 pci_release_regions(h->pdev); in hpsa_undo_allocations_after_kdump_soft_reset()
6612 kfree(h); in hpsa_undo_allocations_after_kdump_soft_reset()
6616 static void fail_all_outstanding_cmds(struct ctlr_info *h) in fail_all_outstanding_cmds() argument
6621 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */ in fail_all_outstanding_cmds()
6622 for (i = 0; i < h->nr_cmds; i++) { in fail_all_outstanding_cmds()
6623 c = h->cmd_pool + i; in fail_all_outstanding_cmds()
6629 cmd_free(h, c); in fail_all_outstanding_cmds()
6633 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) in set_lockup_detected_for_all_cpus() argument
6639 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); in set_lockup_detected_for_all_cpus()
6645 static void controller_lockup_detected(struct ctlr_info *h) in controller_lockup_detected() argument
6650 h->access.set_intr_mask(h, HPSA_INTR_OFF); in controller_lockup_detected()
6651 spin_lock_irqsave(&h->lock, flags); in controller_lockup_detected()
6652 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); in controller_lockup_detected()
6655 dev_warn(&h->pdev->dev, in controller_lockup_detected()
6659 set_lockup_detected_for_all_cpus(h, lockup_detected); in controller_lockup_detected()
6660 spin_unlock_irqrestore(&h->lock, flags); in controller_lockup_detected()
6661 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", in controller_lockup_detected()
6663 pci_disable_device(h->pdev); in controller_lockup_detected()
6664 fail_all_outstanding_cmds(h); in controller_lockup_detected()
6667 static void detect_controller_lockup(struct ctlr_info *h) in detect_controller_lockup() argument
6675 if (time_after64(h->last_intr_timestamp + in detect_controller_lockup()
6676 (h->heartbeat_sample_interval), now)) in detect_controller_lockup()
6684 if (time_after64(h->last_heartbeat_timestamp + in detect_controller_lockup()
6685 (h->heartbeat_sample_interval), now)) in detect_controller_lockup()
6689 spin_lock_irqsave(&h->lock, flags); in detect_controller_lockup()
6690 heartbeat = readl(&h->cfgtable->HeartBeat); in detect_controller_lockup()
6691 spin_unlock_irqrestore(&h->lock, flags); in detect_controller_lockup()
6692 if (h->last_heartbeat == heartbeat) { in detect_controller_lockup()
6693 controller_lockup_detected(h); in detect_controller_lockup()
6698 h->last_heartbeat = heartbeat; in detect_controller_lockup()
6699 h->last_heartbeat_timestamp = now; in detect_controller_lockup()
6702 static void hpsa_ack_ctlr_events(struct ctlr_info *h) in hpsa_ack_ctlr_events() argument
6707 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) in hpsa_ack_ctlr_events()
6711 if ((h->transMethod & (CFGTBL_Trans_io_accel1 in hpsa_ack_ctlr_events()
6713 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || in hpsa_ack_ctlr_events()
6714 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { in hpsa_ack_ctlr_events()
6716 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) in hpsa_ack_ctlr_events()
6718 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) in hpsa_ack_ctlr_events()
6721 scsi_block_requests(h->scsi_host); in hpsa_ack_ctlr_events()
6722 for (i = 0; i < h->ndevices; i++) in hpsa_ack_ctlr_events()
6723 h->dev[i]->offload_enabled = 0; in hpsa_ack_ctlr_events()
6724 hpsa_drain_accel_commands(h); in hpsa_ack_ctlr_events()
6726 dev_warn(&h->pdev->dev, in hpsa_ack_ctlr_events()
6728 h->events, event_type); in hpsa_ack_ctlr_events()
6729 writel(h->events, &(h->cfgtable->clear_event_notify)); in hpsa_ack_ctlr_events()
6731 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); in hpsa_ack_ctlr_events()
6733 hpsa_wait_for_clear_event_notify_ack(h); in hpsa_ack_ctlr_events()
6734 scsi_unblock_requests(h->scsi_host); in hpsa_ack_ctlr_events()
6737 writel(h->events, &(h->cfgtable->clear_event_notify)); in hpsa_ack_ctlr_events()
6738 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); in hpsa_ack_ctlr_events()
6739 hpsa_wait_for_clear_event_notify_ack(h); in hpsa_ack_ctlr_events()
6741 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); in hpsa_ack_ctlr_events()
6742 hpsa_wait_for_mode_change_ack(h); in hpsa_ack_ctlr_events()
6753 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) in hpsa_ctlr_needs_rescan() argument
6755 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) in hpsa_ctlr_needs_rescan()
6758 h->events = readl(&(h->cfgtable->event_notify)); in hpsa_ctlr_needs_rescan()
6759 return h->events & RESCAN_REQUIRED_EVENT_BITS; in hpsa_ctlr_needs_rescan()
6765 static int hpsa_offline_devices_ready(struct ctlr_info *h) in hpsa_offline_devices_ready() argument
6771 spin_lock_irqsave(&h->offline_device_lock, flags); in hpsa_offline_devices_ready()
6772 list_for_each_safe(this, tmp, &h->offline_device_list) { in hpsa_offline_devices_ready()
6775 spin_unlock_irqrestore(&h->offline_device_lock, flags); in hpsa_offline_devices_ready()
6776 if (!hpsa_volume_offline(h, d->scsi3addr)) { in hpsa_offline_devices_ready()
6777 spin_lock_irqsave(&h->offline_device_lock, flags); in hpsa_offline_devices_ready()
6779 spin_unlock_irqrestore(&h->offline_device_lock, flags); in hpsa_offline_devices_ready()
6782 spin_lock_irqsave(&h->offline_device_lock, flags); in hpsa_offline_devices_ready()
6784 spin_unlock_irqrestore(&h->offline_device_lock, flags); in hpsa_offline_devices_ready()
6791 struct ctlr_info *h = container_of(to_delayed_work(work), in hpsa_rescan_ctlr_worker() local
6795 if (h->remove_in_progress) in hpsa_rescan_ctlr_worker()
6798 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { in hpsa_rescan_ctlr_worker()
6799 scsi_host_get(h->scsi_host); in hpsa_rescan_ctlr_worker()
6800 hpsa_ack_ctlr_events(h); in hpsa_rescan_ctlr_worker()
6801 hpsa_scan_start(h->scsi_host); in hpsa_rescan_ctlr_worker()
6802 scsi_host_put(h->scsi_host); in hpsa_rescan_ctlr_worker()
6804 spin_lock_irqsave(&h->lock, flags); in hpsa_rescan_ctlr_worker()
6805 if (!h->remove_in_progress) in hpsa_rescan_ctlr_worker()
6806 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, in hpsa_rescan_ctlr_worker()
6807 h->heartbeat_sample_interval); in hpsa_rescan_ctlr_worker()
6808 spin_unlock_irqrestore(&h->lock, flags); in hpsa_rescan_ctlr_worker()
6814 struct ctlr_info *h = container_of(to_delayed_work(work), in hpsa_monitor_ctlr_worker() local
6817 detect_controller_lockup(h); in hpsa_monitor_ctlr_worker()
6818 if (lockup_detected(h)) in hpsa_monitor_ctlr_worker()
6821 spin_lock_irqsave(&h->lock, flags); in hpsa_monitor_ctlr_worker()
6822 if (!h->remove_in_progress) in hpsa_monitor_ctlr_worker()
6823 schedule_delayed_work(&h->monitor_ctlr_work, in hpsa_monitor_ctlr_worker()
6824 h->heartbeat_sample_interval); in hpsa_monitor_ctlr_worker()
6825 spin_unlock_irqrestore(&h->lock, flags); in hpsa_monitor_ctlr_worker()
6828 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, in hpsa_create_controller_wq() argument
6833 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr); in hpsa_create_controller_wq()
6835 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name); in hpsa_create_controller_wq()
6843 struct ctlr_info *h; in hpsa_init_one() local
6870 h = kzalloc(sizeof(*h), GFP_KERNEL); in hpsa_init_one()
6871 if (!h) in hpsa_init_one()
6874 h->pdev = pdev; in hpsa_init_one()
6875 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; in hpsa_init_one()
6876 INIT_LIST_HEAD(&h->offline_device_list); in hpsa_init_one()
6877 spin_lock_init(&h->lock); in hpsa_init_one()
6878 spin_lock_init(&h->offline_device_lock); in hpsa_init_one()
6879 spin_lock_init(&h->scan_lock); in hpsa_init_one()
6880 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); in hpsa_init_one()
6882 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); in hpsa_init_one()
6883 if (!h->rescan_ctlr_wq) { in hpsa_init_one()
6888 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit"); in hpsa_init_one()
6889 if (!h->resubmit_wq) { in hpsa_init_one()
6895 h->lockup_detected = alloc_percpu(u32); in hpsa_init_one()
6896 if (!h->lockup_detected) { in hpsa_init_one()
6900 set_lockup_detected_for_all_cpus(h, 0); in hpsa_init_one()
6902 rc = hpsa_pci_init(h); in hpsa_init_one()
6906 sprintf(h->devname, HPSA "%d", number_of_controllers); in hpsa_init_one()
6907 h->ctlr = number_of_controllers; in hpsa_init_one()
6925 h->access.set_intr_mask(h, HPSA_INTR_OFF); in hpsa_init_one()
6927 if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) in hpsa_init_one()
6930 h->devname, pdev->device, in hpsa_init_one()
6931 h->intr[h->intr_mode], dac ? "" : " not"); in hpsa_init_one()
6932 rc = hpsa_allocate_cmd_pool(h); in hpsa_init_one()
6935 if (hpsa_allocate_sg_chain_blocks(h)) in hpsa_init_one()
6937 init_waitqueue_head(&h->scan_wait_queue); in hpsa_init_one()
6938 h->scan_finished = 1; /* no scan currently in progress */ in hpsa_init_one()
6940 pci_set_drvdata(pdev, h); in hpsa_init_one()
6941 h->ndevices = 0; in hpsa_init_one()
6942 h->hba_mode_enabled = 0; in hpsa_init_one()
6943 h->scsi_host = NULL; in hpsa_init_one()
6944 spin_lock_init(&h->devlock); in hpsa_init_one()
6945 hpsa_put_ctlr_into_performant_mode(h); in hpsa_init_one()
6960 spin_lock_irqsave(&h->lock, flags); in hpsa_init_one()
6961 h->access.set_intr_mask(h, HPSA_INTR_OFF); in hpsa_init_one()
6962 spin_unlock_irqrestore(&h->lock, flags); in hpsa_init_one()
6963 hpsa_free_irqs(h); in hpsa_init_one()
6964 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions, in hpsa_init_one()
6967 dev_warn(&h->pdev->dev, in hpsa_init_one()
6972 rc = hpsa_kdump_soft_reset(h); in hpsa_init_one()
6977 dev_info(&h->pdev->dev, "Board READY.\n"); in hpsa_init_one()
6978 dev_info(&h->pdev->dev, in hpsa_init_one()
6980 h->access.set_intr_mask(h, HPSA_INTR_ON); in hpsa_init_one()
6982 h->access.set_intr_mask(h, HPSA_INTR_OFF); in hpsa_init_one()
6984 rc = controller_reset_failed(h->cfgtable); in hpsa_init_one()
6986 dev_info(&h->pdev->dev, in hpsa_init_one()
6993 hpsa_undo_allocations_after_kdump_soft_reset(h); in hpsa_init_one()
7003 h->acciopath_status = 1; in hpsa_init_one()
7007 h->access.set_intr_mask(h, HPSA_INTR_ON); in hpsa_init_one()
7009 hpsa_hba_inquiry(h); in hpsa_init_one()
7010 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ in hpsa_init_one()
7013 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; in hpsa_init_one()
7014 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); in hpsa_init_one()
7015 schedule_delayed_work(&h->monitor_ctlr_work, in hpsa_init_one()
7016 h->heartbeat_sample_interval); in hpsa_init_one()
7017 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker); in hpsa_init_one()
7018 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, in hpsa_init_one()
7019 h->heartbeat_sample_interval); in hpsa_init_one()
7023 hpsa_free_sg_chain_blocks(h); in hpsa_init_one()
7024 hpsa_free_cmd_pool(h); in hpsa_init_one()
7026 hpsa_free_irqs(h); in hpsa_init_one()
7029 if (h->resubmit_wq) in hpsa_init_one()
7030 destroy_workqueue(h->resubmit_wq); in hpsa_init_one()
7031 if (h->rescan_ctlr_wq) in hpsa_init_one()
7032 destroy_workqueue(h->rescan_ctlr_wq); in hpsa_init_one()
7033 if (h->lockup_detected) in hpsa_init_one()
7034 free_percpu(h->lockup_detected); in hpsa_init_one()
7035 kfree(h); in hpsa_init_one()
7039 static void hpsa_flush_cache(struct ctlr_info *h) in hpsa_flush_cache() argument
7045 if (unlikely(lockup_detected(h))) in hpsa_flush_cache()
7051 c = cmd_alloc(h); in hpsa_flush_cache()
7053 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); in hpsa_flush_cache()
7056 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, in hpsa_flush_cache()
7060 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); in hpsa_flush_cache()
7063 dev_warn(&h->pdev->dev, in hpsa_flush_cache()
7065 cmd_free(h, c); in hpsa_flush_cache()
7072 struct ctlr_info *h; in hpsa_shutdown() local
7074 h = pci_get_drvdata(pdev); in hpsa_shutdown()
7079 hpsa_flush_cache(h); in hpsa_shutdown()
7080 h->access.set_intr_mask(h, HPSA_INTR_OFF); in hpsa_shutdown()
7081 hpsa_free_irqs_and_disable_msix(h); in hpsa_shutdown()
7084 static void hpsa_free_device_info(struct ctlr_info *h) in hpsa_free_device_info() argument
7088 for (i = 0; i < h->ndevices; i++) in hpsa_free_device_info()
7089 kfree(h->dev[i]); in hpsa_free_device_info()
7094 struct ctlr_info *h; in hpsa_remove_one() local
7101 h = pci_get_drvdata(pdev); in hpsa_remove_one()
7104 spin_lock_irqsave(&h->lock, flags); in hpsa_remove_one()
7105 h->remove_in_progress = 1; in hpsa_remove_one()
7106 spin_unlock_irqrestore(&h->lock, flags); in hpsa_remove_one()
7107 cancel_delayed_work_sync(&h->monitor_ctlr_work); in hpsa_remove_one()
7108 cancel_delayed_work_sync(&h->rescan_ctlr_work); in hpsa_remove_one()
7109 destroy_workqueue(h->rescan_ctlr_wq); in hpsa_remove_one()
7110 destroy_workqueue(h->resubmit_wq); in hpsa_remove_one()
7111 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ in hpsa_remove_one()
7113 iounmap(h->vaddr); in hpsa_remove_one()
7114 iounmap(h->transtable); in hpsa_remove_one()
7115 iounmap(h->cfgtable); in hpsa_remove_one()
7116 hpsa_free_device_info(h); in hpsa_remove_one()
7117 hpsa_free_sg_chain_blocks(h); in hpsa_remove_one()
7118 pci_free_consistent(h->pdev, in hpsa_remove_one()
7119 h->nr_cmds * sizeof(struct CommandList), in hpsa_remove_one()
7120 h->cmd_pool, h->cmd_pool_dhandle); in hpsa_remove_one()
7121 pci_free_consistent(h->pdev, in hpsa_remove_one()
7122 h->nr_cmds * sizeof(struct ErrorInfo), in hpsa_remove_one()
7123 h->errinfo_pool, h->errinfo_pool_dhandle); in hpsa_remove_one()
7124 hpsa_free_reply_queues(h); in hpsa_remove_one()
7125 kfree(h->cmd_pool_bits); in hpsa_remove_one()
7126 kfree(h->blockFetchTable); in hpsa_remove_one()
7127 kfree(h->ioaccel1_blockFetchTable); in hpsa_remove_one()
7128 kfree(h->ioaccel2_blockFetchTable); in hpsa_remove_one()
7129 kfree(h->hba_inquiry_data); in hpsa_remove_one()
7132 free_percpu(h->lockup_detected); in hpsa_remove_one()
7133 kfree(h); in hpsa_remove_one()
7192 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) in hpsa_enter_performant_mode() argument
7246 for (i = 0; i < h->nreply_queues; i++) in hpsa_enter_performant_mode()
7247 memset(h->reply_queue[i].head, 0, h->reply_queue_size); in hpsa_enter_performant_mode()
7251 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable); in hpsa_enter_performant_mode()
7253 writel(bft[i], &h->transtable->BlockFetch[i]); in hpsa_enter_performant_mode()
7256 writel(h->max_commands, &h->transtable->RepQSize); in hpsa_enter_performant_mode()
7257 writel(h->nreply_queues, &h->transtable->RepQCount); in hpsa_enter_performant_mode()
7258 writel(0, &h->transtable->RepQCtrAddrLow32); in hpsa_enter_performant_mode()
7259 writel(0, &h->transtable->RepQCtrAddrHigh32); in hpsa_enter_performant_mode()
7261 for (i = 0; i < h->nreply_queues; i++) { in hpsa_enter_performant_mode()
7262 writel(0, &h->transtable->RepQAddr[i].upper); in hpsa_enter_performant_mode()
7263 writel(h->reply_queue[i].busaddr, in hpsa_enter_performant_mode()
7264 &h->transtable->RepQAddr[i].lower); in hpsa_enter_performant_mode()
7267 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); in hpsa_enter_performant_mode()
7268 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); in hpsa_enter_performant_mode()
7274 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); in hpsa_enter_performant_mode()
7275 writel(4, &h->cfgtable->HostWrite.CoalIntCount); in hpsa_enter_performant_mode()
7279 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); in hpsa_enter_performant_mode()
7280 writel(4, &h->cfgtable->HostWrite.CoalIntCount); in hpsa_enter_performant_mode()
7283 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); in hpsa_enter_performant_mode()
7284 if (hpsa_wait_for_mode_change_ack(h)) { in hpsa_enter_performant_mode()
7285 dev_err(&h->pdev->dev, in hpsa_enter_performant_mode()
7289 register_value = readl(&(h->cfgtable->TransportActive)); in hpsa_enter_performant_mode()
7291 dev_err(&h->pdev->dev, in hpsa_enter_performant_mode()
7296 h->access = access; in hpsa_enter_performant_mode()
7297 h->transMethod = transMethod; in hpsa_enter_performant_mode()
7305 for (i = 0; i < h->nreply_queues; i++) { in hpsa_enter_performant_mode()
7306 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); in hpsa_enter_performant_mode()
7307 h->reply_queue[i].current_entry = in hpsa_enter_performant_mode()
7308 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); in hpsa_enter_performant_mode()
7310 bft[7] = h->ioaccel_maxsg + 8; in hpsa_enter_performant_mode()
7311 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, in hpsa_enter_performant_mode()
7312 h->ioaccel1_blockFetchTable); in hpsa_enter_performant_mode()
7315 for (i = 0; i < h->nreply_queues; i++) in hpsa_enter_performant_mode()
7316 memset(h->reply_queue[i].head, in hpsa_enter_performant_mode()
7318 h->reply_queue_size); in hpsa_enter_performant_mode()
7323 for (i = 0; i < h->nr_cmds; i++) { in hpsa_enter_performant_mode()
7324 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; in hpsa_enter_performant_mode()
7327 cp->err_info = (u32) (h->errinfo_pool_dhandle + in hpsa_enter_performant_mode()
7338 cpu_to_le64(h->ioaccel_cmd_pool_dhandle + in hpsa_enter_performant_mode()
7346 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, in hpsa_enter_performant_mode()
7349 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; in hpsa_enter_performant_mode()
7350 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, in hpsa_enter_performant_mode()
7351 4, h->ioaccel2_blockFetchTable); in hpsa_enter_performant_mode()
7352 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); in hpsa_enter_performant_mode()
7355 h->ioaccel2_bft2_regs = in hpsa_enter_performant_mode()
7356 remap_pci_mem(pci_resource_start(h->pdev, in hpsa_enter_performant_mode()
7360 sizeof(*h->ioaccel2_bft2_regs)); in hpsa_enter_performant_mode()
7362 writel(bft2[i], &h->ioaccel2_bft2_regs[i]); in hpsa_enter_performant_mode()
7364 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); in hpsa_enter_performant_mode()
7365 if (hpsa_wait_for_mode_change_ack(h)) { in hpsa_enter_performant_mode()
7366 dev_err(&h->pdev->dev, in hpsa_enter_performant_mode()
7373 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) in hpsa_alloc_ioaccel_cmd_and_bft() argument
7375 h->ioaccel_maxsg = in hpsa_alloc_ioaccel_cmd_and_bft()
7376 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); in hpsa_alloc_ioaccel_cmd_and_bft()
7377 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) in hpsa_alloc_ioaccel_cmd_and_bft()
7378 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; in hpsa_alloc_ioaccel_cmd_and_bft()
7386 h->ioaccel_cmd_pool = in hpsa_alloc_ioaccel_cmd_and_bft()
7387 pci_alloc_consistent(h->pdev, in hpsa_alloc_ioaccel_cmd_and_bft()
7388 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), in hpsa_alloc_ioaccel_cmd_and_bft()
7389 &(h->ioaccel_cmd_pool_dhandle)); in hpsa_alloc_ioaccel_cmd_and_bft()
7391 h->ioaccel1_blockFetchTable = in hpsa_alloc_ioaccel_cmd_and_bft()
7392 kmalloc(((h->ioaccel_maxsg + 1) * in hpsa_alloc_ioaccel_cmd_and_bft()
7395 if ((h->ioaccel_cmd_pool == NULL) || in hpsa_alloc_ioaccel_cmd_and_bft()
7396 (h->ioaccel1_blockFetchTable == NULL)) in hpsa_alloc_ioaccel_cmd_and_bft()
7399 memset(h->ioaccel_cmd_pool, 0, in hpsa_alloc_ioaccel_cmd_and_bft()
7400 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); in hpsa_alloc_ioaccel_cmd_and_bft()
7404 if (h->ioaccel_cmd_pool) in hpsa_alloc_ioaccel_cmd_and_bft()
7405 pci_free_consistent(h->pdev, in hpsa_alloc_ioaccel_cmd_and_bft()
7406 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), in hpsa_alloc_ioaccel_cmd_and_bft()
7407 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); in hpsa_alloc_ioaccel_cmd_and_bft()
7408 kfree(h->ioaccel1_blockFetchTable); in hpsa_alloc_ioaccel_cmd_and_bft()
7412 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) in ioaccel2_alloc_cmds_and_bft() argument
7416 h->ioaccel_maxsg = in ioaccel2_alloc_cmds_and_bft()
7417 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); in ioaccel2_alloc_cmds_and_bft()
7418 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) in ioaccel2_alloc_cmds_and_bft()
7419 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; in ioaccel2_alloc_cmds_and_bft()
7423 h->ioaccel2_cmd_pool = in ioaccel2_alloc_cmds_and_bft()
7424 pci_alloc_consistent(h->pdev, in ioaccel2_alloc_cmds_and_bft()
7425 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), in ioaccel2_alloc_cmds_and_bft()
7426 &(h->ioaccel2_cmd_pool_dhandle)); in ioaccel2_alloc_cmds_and_bft()
7428 h->ioaccel2_blockFetchTable = in ioaccel2_alloc_cmds_and_bft()
7429 kmalloc(((h->ioaccel_maxsg + 1) * in ioaccel2_alloc_cmds_and_bft()
7432 if ((h->ioaccel2_cmd_pool == NULL) || in ioaccel2_alloc_cmds_and_bft()
7433 (h->ioaccel2_blockFetchTable == NULL)) in ioaccel2_alloc_cmds_and_bft()
7436 memset(h->ioaccel2_cmd_pool, 0, in ioaccel2_alloc_cmds_and_bft()
7437 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); in ioaccel2_alloc_cmds_and_bft()
7441 if (h->ioaccel2_cmd_pool) in ioaccel2_alloc_cmds_and_bft()
7442 pci_free_consistent(h->pdev, in ioaccel2_alloc_cmds_and_bft()
7443 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), in ioaccel2_alloc_cmds_and_bft()
7444 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); in ioaccel2_alloc_cmds_and_bft()
7445 kfree(h->ioaccel2_blockFetchTable); in ioaccel2_alloc_cmds_and_bft()
7449 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) in hpsa_put_ctlr_into_performant_mode() argument
7459 trans_support = readl(&(h->cfgtable->TransportSupport)); in hpsa_put_ctlr_into_performant_mode()
7467 if (hpsa_alloc_ioaccel_cmd_and_bft(h)) in hpsa_put_ctlr_into_performant_mode()
7473 if (ioaccel2_alloc_cmds_and_bft(h)) in hpsa_put_ctlr_into_performant_mode()
7478 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; in hpsa_put_ctlr_into_performant_mode()
7479 hpsa_get_max_perf_mode_cmds(h); in hpsa_put_ctlr_into_performant_mode()
7481 h->reply_queue_size = h->max_commands * sizeof(u64); in hpsa_put_ctlr_into_performant_mode()
7483 for (i = 0; i < h->nreply_queues; i++) { in hpsa_put_ctlr_into_performant_mode()
7484 h->reply_queue[i].head = pci_alloc_consistent(h->pdev, in hpsa_put_ctlr_into_performant_mode()
7485 h->reply_queue_size, in hpsa_put_ctlr_into_performant_mode()
7486 &(h->reply_queue[i].busaddr)); in hpsa_put_ctlr_into_performant_mode()
7487 if (!h->reply_queue[i].head) in hpsa_put_ctlr_into_performant_mode()
7489 h->reply_queue[i].size = h->max_commands; in hpsa_put_ctlr_into_performant_mode()
7490 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ in hpsa_put_ctlr_into_performant_mode()
7491 h->reply_queue[i].current_entry = 0; in hpsa_put_ctlr_into_performant_mode()
7495 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * in hpsa_put_ctlr_into_performant_mode()
7497 if (!h->blockFetchTable) in hpsa_put_ctlr_into_performant_mode()
7500 hpsa_enter_performant_mode(h, trans_support); in hpsa_put_ctlr_into_performant_mode()
7504 hpsa_free_reply_queues(h); in hpsa_put_ctlr_into_performant_mode()
7505 kfree(h->blockFetchTable); in hpsa_put_ctlr_into_performant_mode()
7513 static void hpsa_drain_accel_commands(struct ctlr_info *h) in hpsa_drain_accel_commands() argument
7521 for (i = 0; i < h->nr_cmds; i++) { in hpsa_drain_accel_commands()
7522 c = h->cmd_pool + i; in hpsa_drain_accel_commands()
7526 cmd_free(h, c); in hpsa_drain_accel_commands()