Lines Matching refs:h
213 static int hpsa_add_sas_host(struct ctlr_info *h);
214 static void hpsa_delete_sas_host(struct ctlr_info *h);
219 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
237 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
238 static struct CommandList *cmd_alloc(struct ctlr_info *h);
239 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
240 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
242 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
245 static void hpsa_free_cmd_pool(struct ctlr_info *h);
249 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
261 static void hpsa_update_scsi_devices(struct ctlr_info *h);
262 static int check_for_unit_attention(struct ctlr_info *h,
264 static void check_ioctl_unit_attention(struct ctlr_info *h,
269 static void hpsa_free_performant_mode(struct ctlr_info *h);
270 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
271 static inline u32 next_command(struct ctlr_info *h, u8 q);
281 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
284 static void hpsa_drain_accel_commands(struct ctlr_info *h);
285 static void hpsa_flush_cache(struct ctlr_info *h);
286 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
290 static u32 lockup_detected(struct ctlr_info *h);
291 static int detect_controller_lockup(struct ctlr_info *h);
292 static void hpsa_disable_rld_caching(struct ctlr_info *h);
293 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
295 static int hpsa_luns_changed(struct ctlr_info *h);
341 static int check_for_unit_attention(struct ctlr_info *h, in check_for_unit_attention() argument
359 dev_warn(&h->pdev->dev, in check_for_unit_attention()
361 h->devname); in check_for_unit_attention()
364 dev_warn(&h->pdev->dev, in check_for_unit_attention()
365 "%s: LUN failure detected\n", h->devname); in check_for_unit_attention()
368 dev_warn(&h->pdev->dev, in check_for_unit_attention()
369 "%s: report LUN data changed\n", h->devname); in check_for_unit_attention()
376 dev_warn(&h->pdev->dev, in check_for_unit_attention()
378 h->devname); in check_for_unit_attention()
381 dev_warn(&h->pdev->dev, in check_for_unit_attention()
383 h->devname); in check_for_unit_attention()
386 dev_warn(&h->pdev->dev, in check_for_unit_attention()
388 h->devname); in check_for_unit_attention()
394 static int check_for_busy(struct ctlr_info *h, struct CommandList *c) in check_for_busy() argument
400 dev_warn(&h->pdev->dev, HPSA "device busy"); in check_for_busy()
404 static u32 lockup_detected(struct ctlr_info *h);
409 struct ctlr_info *h; in host_show_lockup_detected() local
412 h = shost_to_hba(shost); in host_show_lockup_detected()
413 ld = lockup_detected(h); in host_show_lockup_detected()
423 struct ctlr_info *h; in host_store_hp_ssd_smart_path_status() local
434 h = shost_to_hba(shost); in host_store_hp_ssd_smart_path_status()
435 h->acciopath_status = !!status; in host_store_hp_ssd_smart_path_status()
436 dev_warn(&h->pdev->dev, in host_store_hp_ssd_smart_path_status()
438 h->acciopath_status ? "enabled" : "disabled"); in host_store_hp_ssd_smart_path_status()
447 struct ctlr_info *h; in host_store_raid_offload_debug() local
460 h = shost_to_hba(shost); in host_store_raid_offload_debug()
461 h->raid_offload_debug = debug_level; in host_store_raid_offload_debug()
462 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", in host_store_raid_offload_debug()
463 h->raid_offload_debug); in host_store_raid_offload_debug()
471 struct ctlr_info *h; in host_store_rescan() local
473 h = shost_to_hba(shost); in host_store_rescan()
474 hpsa_scan_start(h->scsi_host); in host_store_rescan()
481 struct ctlr_info *h; in host_show_firmware_revision() local
485 h = shost_to_hba(shost); in host_show_firmware_revision()
486 if (!h->hba_inquiry_data) in host_show_firmware_revision()
488 fwrev = &h->hba_inquiry_data[32]; in host_show_firmware_revision()
497 struct ctlr_info *h = shost_to_hba(shost); in host_show_commands_outstanding() local
500 atomic_read(&h->commands_outstanding)); in host_show_commands_outstanding()
506 struct ctlr_info *h; in host_show_transport_mode() local
509 h = shost_to_hba(shost); in host_show_transport_mode()
511 h->transMethod & CFGTBL_Trans_Performant ? in host_show_transport_mode()
518 struct ctlr_info *h; in host_show_hp_ssd_smart_path_status() local
521 h = shost_to_hba(shost); in host_show_hp_ssd_smart_path_status()
523 (h->acciopath_status == 1) ? "enabled" : "disabled"); in host_show_hp_ssd_smart_path_status()
614 struct ctlr_info *h; in host_show_resettable() local
617 h = shost_to_hba(shost); in host_show_resettable()
618 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); in host_show_resettable()
649 struct ctlr_info *h; in raid_level_show() local
655 h = sdev_to_hba(sdev); in raid_level_show()
656 spin_lock_irqsave(&h->lock, flags); in raid_level_show()
659 spin_unlock_irqrestore(&h->lock, flags); in raid_level_show()
665 spin_unlock_irqrestore(&h->lock, flags); in raid_level_show()
671 spin_unlock_irqrestore(&h->lock, flags); in raid_level_show()
681 struct ctlr_info *h; in lunid_show() local
688 h = sdev_to_hba(sdev); in lunid_show()
689 spin_lock_irqsave(&h->lock, flags); in lunid_show()
692 spin_unlock_irqrestore(&h->lock, flags); in lunid_show()
696 spin_unlock_irqrestore(&h->lock, flags); in lunid_show()
705 struct ctlr_info *h; in unique_id_show() local
712 h = sdev_to_hba(sdev); in unique_id_show()
713 spin_lock_irqsave(&h->lock, flags); in unique_id_show()
716 spin_unlock_irqrestore(&h->lock, flags); in unique_id_show()
720 spin_unlock_irqrestore(&h->lock, flags); in unique_id_show()
733 struct ctlr_info *h; in host_show_hp_ssd_smart_path_enabled() local
740 h = sdev_to_hba(sdev); in host_show_hp_ssd_smart_path_enabled()
741 spin_lock_irqsave(&h->lock, flags); in host_show_hp_ssd_smart_path_enabled()
744 spin_unlock_irqrestore(&h->lock, flags); in host_show_hp_ssd_smart_path_enabled()
748 spin_unlock_irqrestore(&h->lock, flags); in host_show_hp_ssd_smart_path_enabled()
757 struct ctlr_info *h; in path_info_show() local
770 h = sdev_to_hba(sdev); in path_info_show()
771 spin_lock_irqsave(&h->devlock, flags); in path_info_show()
774 spin_unlock_irqrestore(&h->devlock, flags); in path_info_show()
791 h->scsi_host->host_no, in path_info_show()
837 spin_unlock_irqrestore(&h->devlock, flags); in path_info_show()
913 static inline u32 next_command(struct ctlr_info *h, u8 q) in next_command() argument
916 struct reply_queue_buffer *rq = &h->reply_queue[q]; in next_command()
918 if (h->transMethod & CFGTBL_Trans_io_accel1) in next_command()
919 return h->access.command_completed(h, q); in next_command()
921 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) in next_command()
922 return h->access.command_completed(h, q); in next_command()
927 atomic_dec(&h->commands_outstanding); in next_command()
932 if (rq->current_entry == h->max_commands) { in next_command()
971 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, in set_performant_mode() argument
974 if (likely(h->transMethod & CFGTBL_Trans_Performant)) { in set_performant_mode()
975 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); in set_performant_mode()
976 if (unlikely(!h->msix_vector)) in set_performant_mode()
980 raw_smp_processor_id() % h->nreply_queues; in set_performant_mode()
982 c->Header.ReplyQueue = reply_queue % h->nreply_queues; in set_performant_mode()
986 static void set_ioaccel1_performant_mode(struct ctlr_info *h, in set_ioaccel1_performant_mode() argument
990 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; in set_ioaccel1_performant_mode()
997 cp->ReplyQueue = smp_processor_id() % h->nreply_queues; in set_ioaccel1_performant_mode()
999 cp->ReplyQueue = reply_queue % h->nreply_queues; in set_ioaccel1_performant_mode()
1006 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | in set_ioaccel1_performant_mode()
1010 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h, in set_ioaccel2_tmf_performant_mode() argument
1015 &h->ioaccel2_cmd_pool[c->cmdindex]; in set_ioaccel2_tmf_performant_mode()
1021 cp->reply_queue = smp_processor_id() % h->nreply_queues; in set_ioaccel2_tmf_performant_mode()
1023 cp->reply_queue = reply_queue % h->nreply_queues; in set_ioaccel2_tmf_performant_mode()
1029 c->busaddr |= h->ioaccel2_blockFetchTable[0]; in set_ioaccel2_tmf_performant_mode()
1032 static void set_ioaccel2_performant_mode(struct ctlr_info *h, in set_ioaccel2_performant_mode() argument
1036 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; in set_ioaccel2_performant_mode()
1043 cp->reply_queue = smp_processor_id() % h->nreply_queues; in set_ioaccel2_performant_mode()
1045 cp->reply_queue = reply_queue % h->nreply_queues; in set_ioaccel2_performant_mode()
1052 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); in set_ioaccel2_performant_mode()
1067 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, in dial_down_lockup_detection_during_fw_flash() argument
1072 atomic_inc(&h->firmware_flash_in_progress); in dial_down_lockup_detection_during_fw_flash()
1073 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; in dial_down_lockup_detection_during_fw_flash()
1076 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, in dial_up_lockup_detection_on_fw_flash_complete() argument
1080 atomic_dec_and_test(&h->firmware_flash_in_progress)) in dial_up_lockup_detection_on_fw_flash_complete()
1081 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; in dial_up_lockup_detection_on_fw_flash_complete()
1084 static void __enqueue_cmd_and_start_io(struct ctlr_info *h, in __enqueue_cmd_and_start_io() argument
1087 dial_down_lockup_detection_during_fw_flash(h, c); in __enqueue_cmd_and_start_io()
1088 atomic_inc(&h->commands_outstanding); in __enqueue_cmd_and_start_io()
1091 set_ioaccel1_performant_mode(h, c, reply_queue); in __enqueue_cmd_and_start_io()
1092 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); in __enqueue_cmd_and_start_io()
1095 set_ioaccel2_performant_mode(h, c, reply_queue); in __enqueue_cmd_and_start_io()
1096 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); in __enqueue_cmd_and_start_io()
1099 set_ioaccel2_tmf_performant_mode(h, c, reply_queue); in __enqueue_cmd_and_start_io()
1100 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); in __enqueue_cmd_and_start_io()
1103 set_performant_mode(h, c, reply_queue); in __enqueue_cmd_and_start_io()
1104 h->access.submit_command(h, c); in __enqueue_cmd_and_start_io()
1108 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) in enqueue_cmd_and_start_io() argument
1113 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); in enqueue_cmd_and_start_io()
1121 static inline int is_scsi_rev_5(struct ctlr_info *h) in is_scsi_rev_5() argument
1123 if (!h->hba_inquiry_data) in is_scsi_rev_5()
1125 if ((h->hba_inquiry_data[2] & 0x07) == 5) in is_scsi_rev_5()
1130 static int hpsa_find_target_lun(struct ctlr_info *h, in hpsa_find_target_lun() argument
1141 for (i = 0; i < h->ndevices; i++) { in hpsa_find_target_lun()
1142 if (h->dev[i]->bus == bus && h->dev[i]->target != -1) in hpsa_find_target_lun()
1143 __set_bit(h->dev[i]->target, lun_taken); in hpsa_find_target_lun()
1156 static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h, in hpsa_show_dev_msg() argument
1162 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL) in hpsa_show_dev_msg()
1197 dev_printk(level, &h->pdev->dev, in hpsa_show_dev_msg()
1199 h->scsi_host->host_no, dev->bus, dev->target, dev->lun, in hpsa_show_dev_msg()
1211 static int hpsa_scsi_add_entry(struct ctlr_info *h, in hpsa_scsi_add_entry() argument
1216 int n = h->ndevices; in hpsa_scsi_add_entry()
1222 dev_err(&h->pdev->dev, "too many devices, some will be " in hpsa_scsi_add_entry()
1238 if (hpsa_find_target_lun(h, device->scsi3addr, in hpsa_scsi_add_entry()
1254 sd = h->dev[i]; in hpsa_scsi_add_entry()
1267 dev_warn(&h->pdev->dev, "physical device with no LUN=0," in hpsa_scsi_add_entry()
1275 h->dev[n] = device; in hpsa_scsi_add_entry()
1276 h->ndevices++; in hpsa_scsi_add_entry()
1279 hpsa_show_dev_msg(KERN_INFO, h, device, in hpsa_scsi_add_entry()
1287 static void hpsa_scsi_update_entry(struct ctlr_info *h, in hpsa_scsi_update_entry() argument
1295 h->dev[entry]->raid_level = new_entry->raid_level; in hpsa_scsi_update_entry()
1307 h->dev[entry]->raid_map = new_entry->raid_map; in hpsa_scsi_update_entry()
1308 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; in hpsa_scsi_update_entry()
1311 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; in hpsa_scsi_update_entry()
1314 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled; in hpsa_scsi_update_entry()
1315 h->dev[entry]->offload_config = new_entry->offload_config; in hpsa_scsi_update_entry()
1316 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; in hpsa_scsi_update_entry()
1317 h->dev[entry]->queue_depth = new_entry->queue_depth; in hpsa_scsi_update_entry()
1324 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled; in hpsa_scsi_update_entry()
1326 h->dev[entry]->offload_enabled = 0; in hpsa_scsi_update_entry()
1328 offload_enabled = h->dev[entry]->offload_enabled; in hpsa_scsi_update_entry()
1329 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled; in hpsa_scsi_update_entry()
1330 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated"); in hpsa_scsi_update_entry()
1331 h->dev[entry]->offload_enabled = offload_enabled; in hpsa_scsi_update_entry()
1335 static void hpsa_scsi_replace_entry(struct ctlr_info *h, in hpsa_scsi_replace_entry() argument
1342 removed[*nremoved] = h->dev[entry]; in hpsa_scsi_replace_entry()
1350 new_entry->target = h->dev[entry]->target; in hpsa_scsi_replace_entry()
1351 new_entry->lun = h->dev[entry]->lun; in hpsa_scsi_replace_entry()
1354 h->dev[entry] = new_entry; in hpsa_scsi_replace_entry()
1357 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced"); in hpsa_scsi_replace_entry()
1363 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry, in hpsa_scsi_remove_entry() argument
1372 sd = h->dev[entry]; in hpsa_scsi_remove_entry()
1373 removed[*nremoved] = h->dev[entry]; in hpsa_scsi_remove_entry()
1376 for (i = entry; i < h->ndevices-1; i++) in hpsa_scsi_remove_entry()
1377 h->dev[i] = h->dev[i+1]; in hpsa_scsi_remove_entry()
1378 h->ndevices--; in hpsa_scsi_remove_entry()
1379 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed"); in hpsa_scsi_remove_entry()
1392 static void fixup_botched_add(struct ctlr_info *h, in fixup_botched_add() argument
1401 spin_lock_irqsave(&h->lock, flags); in fixup_botched_add()
1402 for (i = 0; i < h->ndevices; i++) { in fixup_botched_add()
1403 if (h->dev[i] == added) { in fixup_botched_add()
1404 for (j = i; j < h->ndevices-1; j++) in fixup_botched_add()
1405 h->dev[j] = h->dev[j+1]; in fixup_botched_add()
1406 h->ndevices--; in fixup_botched_add()
1410 spin_unlock_irqrestore(&h->lock, flags); in fixup_botched_add()
1498 static void hpsa_monitor_offline_device(struct ctlr_info *h, in hpsa_monitor_offline_device() argument
1505 spin_lock_irqsave(&h->offline_device_lock, flags); in hpsa_monitor_offline_device()
1506 list_for_each_entry(device, &h->offline_device_list, offline_list) { in hpsa_monitor_offline_device()
1509 spin_unlock_irqrestore(&h->offline_device_lock, flags); in hpsa_monitor_offline_device()
1513 spin_unlock_irqrestore(&h->offline_device_lock, flags); in hpsa_monitor_offline_device()
1518 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__); in hpsa_monitor_offline_device()
1522 spin_lock_irqsave(&h->offline_device_lock, flags); in hpsa_monitor_offline_device()
1523 list_add_tail(&device->offline_list, &h->offline_device_list); in hpsa_monitor_offline_device()
1524 spin_unlock_irqrestore(&h->offline_device_lock, flags); in hpsa_monitor_offline_device()
1528 static void hpsa_show_volume_status(struct ctlr_info *h, in hpsa_show_volume_status() argument
1532 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1534 h->scsi_host->host_no, in hpsa_show_volume_status()
1540 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1542 h->scsi_host->host_no, in hpsa_show_volume_status()
1546 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1548 h->scsi_host->host_no, in hpsa_show_volume_status()
1552 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1554 h->scsi_host->host_no, in hpsa_show_volume_status()
1558 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1560 h->scsi_host->host_no, in hpsa_show_volume_status()
1564 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1566 h->scsi_host->host_no, in hpsa_show_volume_status()
1570 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1572 h->scsi_host->host_no, in hpsa_show_volume_status()
1576 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1578 h->scsi_host->host_no, in hpsa_show_volume_status()
1582 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1584 h->scsi_host->host_no, in hpsa_show_volume_status()
1588 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1590 h->scsi_host->host_no, in hpsa_show_volume_status()
1594 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1596 h->scsi_host->host_no, in hpsa_show_volume_status()
1600 dev_info(&h->pdev->dev, in hpsa_show_volume_status()
1602 h->scsi_host->host_no, in hpsa_show_volume_status()
1612 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, in hpsa_figure_phys_disk_ptrs() argument
1650 qdepth = min(h->nr_cmds, qdepth + in hpsa_figure_phys_disk_ptrs()
1675 logical_drive->queue_depth = h->nr_cmds; in hpsa_figure_phys_disk_ptrs()
1678 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, in hpsa_update_log_drive_phys_drive_ptrs() argument
1700 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); in hpsa_update_log_drive_phys_drive_ptrs()
1704 static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) in hpsa_add_device() argument
1708 if (!h->scsi_host) in hpsa_add_device()
1712 rc = scsi_add_device(h->scsi_host, device->bus, in hpsa_add_device()
1715 rc = hpsa_add_sas_device(h->sas_host, device); in hpsa_add_device()
1720 static void hpsa_remove_device(struct ctlr_info *h, in hpsa_remove_device() argument
1725 if (!h->scsi_host) in hpsa_remove_device()
1729 sdev = scsi_device_lookup(h->scsi_host, device->bus, in hpsa_remove_device()
1740 hpsa_show_dev_msg(KERN_WARNING, h, device, in hpsa_remove_device()
1747 static void adjust_hpsa_scsi_table(struct ctlr_info *h, in adjust_hpsa_scsi_table() argument
1764 if (h->reset_in_progress) { in adjust_hpsa_scsi_table()
1765 h->drv_req_rescan = 1; in adjust_hpsa_scsi_table()
1773 dev_warn(&h->pdev->dev, "out of memory in " in adjust_hpsa_scsi_table()
1778 spin_lock_irqsave(&h->devlock, flags); in adjust_hpsa_scsi_table()
1790 while (i < h->ndevices) { in adjust_hpsa_scsi_table()
1791 csd = h->dev[i]; in adjust_hpsa_scsi_table()
1795 hpsa_scsi_remove_entry(h, i, removed, &nremoved); in adjust_hpsa_scsi_table()
1799 hpsa_scsi_replace_entry(h, i, sd[entry], in adjust_hpsa_scsi_table()
1806 hpsa_scsi_update_entry(h, i, sd[entry]); in adjust_hpsa_scsi_table()
1825 hpsa_show_volume_status(h, sd[i]); in adjust_hpsa_scsi_table()
1826 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline"); in adjust_hpsa_scsi_table()
1830 device_change = hpsa_scsi_find_entry(sd[i], h->dev, in adjust_hpsa_scsi_table()
1831 h->ndevices, &entry); in adjust_hpsa_scsi_table()
1834 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0) in adjust_hpsa_scsi_table()
1840 dev_warn(&h->pdev->dev, in adjust_hpsa_scsi_table()
1845 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices); in adjust_hpsa_scsi_table()
1850 for (i = 0; i < h->ndevices; i++) { in adjust_hpsa_scsi_table()
1851 if (h->dev[i] == NULL) in adjust_hpsa_scsi_table()
1853 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled; in adjust_hpsa_scsi_table()
1856 spin_unlock_irqrestore(&h->devlock, flags); in adjust_hpsa_scsi_table()
1866 hpsa_monitor_offline_device(h, sd[i]->scsi3addr); in adjust_hpsa_scsi_table()
1881 hpsa_remove_device(h, removed[i]); in adjust_hpsa_scsi_table()
1894 rc = hpsa_add_device(h, added[i]); in adjust_hpsa_scsi_table()
1897 dev_warn(&h->pdev->dev, in adjust_hpsa_scsi_table()
1902 fixup_botched_add(h, added[i]); in adjust_hpsa_scsi_table()
1903 h->drv_req_rescan = 1; in adjust_hpsa_scsi_table()
1915 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, in lookup_hpsa_scsi_dev() argument
1921 for (i = 0; i < h->ndevices; i++) { in lookup_hpsa_scsi_dev()
1922 sd = h->dev[i]; in lookup_hpsa_scsi_dev()
1933 struct ctlr_info *h; in hpsa_slave_alloc() local
1935 h = sdev_to_hba(sdev); in hpsa_slave_alloc()
1936 spin_lock_irqsave(&h->devlock, flags); in hpsa_slave_alloc()
1943 sd = hpsa_find_device_by_sas_rphy(h, rphy); in hpsa_slave_alloc()
1949 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), in hpsa_slave_alloc()
1957 spin_unlock_irqrestore(&h->devlock, flags); in hpsa_slave_alloc()
1986 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h) in hpsa_free_ioaccel2_sg_chain_blocks() argument
1990 if (!h->ioaccel2_cmd_sg_list) in hpsa_free_ioaccel2_sg_chain_blocks()
1992 for (i = 0; i < h->nr_cmds; i++) { in hpsa_free_ioaccel2_sg_chain_blocks()
1993 kfree(h->ioaccel2_cmd_sg_list[i]); in hpsa_free_ioaccel2_sg_chain_blocks()
1994 h->ioaccel2_cmd_sg_list[i] = NULL; in hpsa_free_ioaccel2_sg_chain_blocks()
1996 kfree(h->ioaccel2_cmd_sg_list); in hpsa_free_ioaccel2_sg_chain_blocks()
1997 h->ioaccel2_cmd_sg_list = NULL; in hpsa_free_ioaccel2_sg_chain_blocks()
2000 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h) in hpsa_allocate_ioaccel2_sg_chain_blocks() argument
2004 if (h->chainsize <= 0) in hpsa_allocate_ioaccel2_sg_chain_blocks()
2007 h->ioaccel2_cmd_sg_list = in hpsa_allocate_ioaccel2_sg_chain_blocks()
2008 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds, in hpsa_allocate_ioaccel2_sg_chain_blocks()
2010 if (!h->ioaccel2_cmd_sg_list) in hpsa_allocate_ioaccel2_sg_chain_blocks()
2012 for (i = 0; i < h->nr_cmds; i++) { in hpsa_allocate_ioaccel2_sg_chain_blocks()
2013 h->ioaccel2_cmd_sg_list[i] = in hpsa_allocate_ioaccel2_sg_chain_blocks()
2014 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) * in hpsa_allocate_ioaccel2_sg_chain_blocks()
2015 h->maxsgentries, GFP_KERNEL); in hpsa_allocate_ioaccel2_sg_chain_blocks()
2016 if (!h->ioaccel2_cmd_sg_list[i]) in hpsa_allocate_ioaccel2_sg_chain_blocks()
2022 hpsa_free_ioaccel2_sg_chain_blocks(h); in hpsa_allocate_ioaccel2_sg_chain_blocks()
2026 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) in hpsa_free_sg_chain_blocks() argument
2030 if (!h->cmd_sg_list) in hpsa_free_sg_chain_blocks()
2032 for (i = 0; i < h->nr_cmds; i++) { in hpsa_free_sg_chain_blocks()
2033 kfree(h->cmd_sg_list[i]); in hpsa_free_sg_chain_blocks()
2034 h->cmd_sg_list[i] = NULL; in hpsa_free_sg_chain_blocks()
2036 kfree(h->cmd_sg_list); in hpsa_free_sg_chain_blocks()
2037 h->cmd_sg_list = NULL; in hpsa_free_sg_chain_blocks()
2040 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h) in hpsa_alloc_sg_chain_blocks() argument
2044 if (h->chainsize <= 0) in hpsa_alloc_sg_chain_blocks()
2047 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, in hpsa_alloc_sg_chain_blocks()
2049 if (!h->cmd_sg_list) { in hpsa_alloc_sg_chain_blocks()
2050 dev_err(&h->pdev->dev, "Failed to allocate SG list\n"); in hpsa_alloc_sg_chain_blocks()
2053 for (i = 0; i < h->nr_cmds; i++) { in hpsa_alloc_sg_chain_blocks()
2054 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * in hpsa_alloc_sg_chain_blocks()
2055 h->chainsize, GFP_KERNEL); in hpsa_alloc_sg_chain_blocks()
2056 if (!h->cmd_sg_list[i]) { in hpsa_alloc_sg_chain_blocks()
2057 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n"); in hpsa_alloc_sg_chain_blocks()
2064 hpsa_free_sg_chain_blocks(h); in hpsa_alloc_sg_chain_blocks()
2068 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h, in hpsa_map_ioaccel2_sg_chain_block() argument
2075 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; in hpsa_map_ioaccel2_sg_chain_block()
2077 temp64 = pci_map_single(h->pdev, chain_block, chain_size, in hpsa_map_ioaccel2_sg_chain_block()
2079 if (dma_mapping_error(&h->pdev->dev, temp64)) { in hpsa_map_ioaccel2_sg_chain_block()
2088 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h, in hpsa_unmap_ioaccel2_sg_chain_block() argument
2098 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE); in hpsa_unmap_ioaccel2_sg_chain_block()
2101 static int hpsa_map_sg_chain_block(struct ctlr_info *h, in hpsa_map_sg_chain_block() argument
2108 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; in hpsa_map_sg_chain_block()
2109 chain_block = h->cmd_sg_list[c->cmdindex]; in hpsa_map_sg_chain_block()
2112 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); in hpsa_map_sg_chain_block()
2114 temp64 = pci_map_single(h->pdev, chain_block, chain_len, in hpsa_map_sg_chain_block()
2116 if (dma_mapping_error(&h->pdev->dev, temp64)) { in hpsa_map_sg_chain_block()
2125 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, in hpsa_unmap_sg_chain_block() argument
2130 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries) in hpsa_unmap_sg_chain_block()
2133 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; in hpsa_unmap_sg_chain_block()
2134 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr), in hpsa_unmap_sg_chain_block()
2143 static int handle_ioaccel_mode2_error(struct ctlr_info *h, in handle_ioaccel_mode2_error() argument
2234 static void hpsa_cmd_resolve_events(struct ctlr_info *h, in hpsa_cmd_resolve_events() argument
2269 spin_lock_irqsave(&h->lock, flags); in hpsa_cmd_resolve_events()
2274 spin_unlock_irqrestore(&h->lock, flags); in hpsa_cmd_resolve_events()
2278 wake_up_all(&h->event_sync_wait_queue); in hpsa_cmd_resolve_events()
2281 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h, in hpsa_cmd_resolve_and_free() argument
2284 hpsa_cmd_resolve_events(h, c); in hpsa_cmd_resolve_and_free()
2285 cmd_tagged_free(h, c); in hpsa_cmd_resolve_and_free()
2288 static void hpsa_cmd_free_and_done(struct ctlr_info *h, in hpsa_cmd_free_and_done() argument
2291 hpsa_cmd_resolve_and_free(h, c); in hpsa_cmd_free_and_done()
2295 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) in hpsa_retry_cmd() argument
2298 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); in hpsa_retry_cmd()
2306 static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c, in hpsa_cmd_abort_and_free() argument
2310 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n", in hpsa_cmd_abort_and_free()
2312 hpsa_cmd_resolve_and_free(h, c); in hpsa_cmd_abort_and_free()
2315 static void process_ioaccel2_completion(struct ctlr_info *h, in process_ioaccel2_completion() argument
2319 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; in process_ioaccel2_completion()
2324 return hpsa_cmd_free_and_done(h, c, cmd); in process_ioaccel2_completion()
2338 return hpsa_retry_cmd(h, c); in process_ioaccel2_completion()
2341 if (handle_ioaccel_mode2_error(h, c, cmd, c2)) in process_ioaccel2_completion()
2342 return hpsa_retry_cmd(h, c); in process_ioaccel2_completion()
2344 return hpsa_cmd_free_and_done(h, c, cmd); in process_ioaccel2_completion()
2348 static int hpsa_evaluate_tmf_status(struct ctlr_info *h, in hpsa_evaluate_tmf_status() argument
2368 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n", in hpsa_evaluate_tmf_status()
2378 struct ctlr_info *h; in complete_scsi_command() local
2390 h = cp->h; in complete_scsi_command()
2392 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex]; in complete_scsi_command()
2396 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) in complete_scsi_command()
2397 hpsa_unmap_sg_chain_block(h, cp); in complete_scsi_command()
2401 hpsa_unmap_ioaccel2_sg_chain_block(h, c2); in complete_scsi_command()
2417 return hpsa_cmd_free_and_done(h, cp, cmd); in complete_scsi_command()
2422 return hpsa_cmd_resolve_and_free(h, cp); in complete_scsi_command()
2424 return hpsa_cmd_abort_and_free(h, cp, cmd); in complete_scsi_command()
2428 return process_ioaccel2_completion(h, cp, cmd, dev); in complete_scsi_command()
2432 return hpsa_cmd_free_and_done(h, cp, cmd); in complete_scsi_command()
2438 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; in complete_scsi_command()
2454 return hpsa_retry_cmd(h, cp); in complete_scsi_command()
2485 dev_warn(&h->pdev->dev, "cp %p has status 0x%x " in complete_scsi_command()
2492 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " in complete_scsi_command()
2514 dev_warn(&h->pdev->dev, in complete_scsi_command()
2531 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n", in complete_scsi_command()
2536 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n", in complete_scsi_command()
2541 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n", in complete_scsi_command()
2546 return hpsa_cmd_abort_and_free(h, cp, cmd); in complete_scsi_command()
2549 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", in complete_scsi_command()
2554 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n", in complete_scsi_command()
2559 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n", in complete_scsi_command()
2564 dev_warn(&h->pdev->dev, "Command unabortable\n"); in complete_scsi_command()
2567 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */ in complete_scsi_command()
2575 dev_warn(&h->pdev->dev, in complete_scsi_command()
2580 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", in complete_scsi_command()
2584 return hpsa_cmd_free_and_done(h, cp, cmd); in complete_scsi_command()
2629 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, in hpsa_scsi_do_simple_cmd_core() argument
2635 __enqueue_cmd_and_start_io(h, c, reply_queue); in hpsa_scsi_do_simple_cmd_core()
2643 dev_warn(&h->pdev->dev, "Command timed out.\n"); in hpsa_scsi_do_simple_cmd_core()
2649 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c, in hpsa_scsi_do_simple_cmd() argument
2652 if (unlikely(lockup_detected(h))) { in hpsa_scsi_do_simple_cmd()
2656 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs); in hpsa_scsi_do_simple_cmd()
2659 static u32 lockup_detected(struct ctlr_info *h) in lockup_detected() argument
2665 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); in lockup_detected()
2672 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, in hpsa_scsi_do_simple_cmd_with_retry() argument
2680 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, in hpsa_scsi_do_simple_cmd_with_retry()
2690 } while ((check_for_unit_attention(h, c) || in hpsa_scsi_do_simple_cmd_with_retry()
2691 check_for_busy(h, c)) && in hpsa_scsi_do_simple_cmd_with_retry()
2693 hpsa_pci_unmap(h->pdev, c, 1, data_direction); in hpsa_scsi_do_simple_cmd_with_retry()
2699 static void hpsa_print_cmd(struct ctlr_info *h, char *txt, in hpsa_print_cmd() argument
2705 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x" in hpsa_print_cmd()
2715 static void hpsa_scsi_interpret_error(struct ctlr_info *h, in hpsa_scsi_interpret_error() argument
2719 struct device *d = &cp->h->pdev->dev; in hpsa_scsi_interpret_error()
2731 hpsa_print_cmd(h, "SCSI status", cp); in hpsa_scsi_interpret_error()
2746 hpsa_print_cmd(h, "overrun condition", cp); in hpsa_scsi_interpret_error()
2752 hpsa_print_cmd(h, "invalid command", cp); in hpsa_scsi_interpret_error()
2757 hpsa_print_cmd(h, "protocol error", cp); in hpsa_scsi_interpret_error()
2760 hpsa_print_cmd(h, "hardware error", cp); in hpsa_scsi_interpret_error()
2763 hpsa_print_cmd(h, "connection lost", cp); in hpsa_scsi_interpret_error()
2766 hpsa_print_cmd(h, "aborted", cp); in hpsa_scsi_interpret_error()
2769 hpsa_print_cmd(h, "abort failed", cp); in hpsa_scsi_interpret_error()
2772 hpsa_print_cmd(h, "unsolicited abort", cp); in hpsa_scsi_interpret_error()
2775 hpsa_print_cmd(h, "timed out", cp); in hpsa_scsi_interpret_error()
2778 hpsa_print_cmd(h, "unabortable", cp); in hpsa_scsi_interpret_error()
2781 hpsa_print_cmd(h, "controller lockup detected", cp); in hpsa_scsi_interpret_error()
2784 hpsa_print_cmd(h, "unknown status", cp); in hpsa_scsi_interpret_error()
2790 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, in hpsa_scsi_do_inquiry() argument
2798 c = cmd_alloc(h); in hpsa_scsi_do_inquiry()
2800 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, in hpsa_scsi_do_inquiry()
2805 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, in hpsa_scsi_do_inquiry()
2811 hpsa_scsi_interpret_error(h, c); in hpsa_scsi_do_inquiry()
2815 cmd_free(h, c); in hpsa_scsi_do_inquiry()
2819 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, in hpsa_send_reset() argument
2826 c = cmd_alloc(h); in hpsa_send_reset()
2830 (void) fill_cmd(c, reset_type, h, NULL, 0, 0, in hpsa_send_reset()
2832 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); in hpsa_send_reset()
2834 dev_warn(&h->pdev->dev, "Failed to send reset command\n"); in hpsa_send_reset()
2841 hpsa_scsi_interpret_error(h, c); in hpsa_send_reset()
2845 cmd_free(h, c); in hpsa_send_reset()
2849 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, in hpsa_cmd_dev_match() argument
2855 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; in hpsa_cmd_dev_match()
2900 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n", in hpsa_cmd_dev_match()
2908 static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, in hpsa_do_reset() argument
2915 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) { in hpsa_do_reset()
2916 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n"); in hpsa_do_reset()
2922 for (i = 0; i < h->nr_cmds; i++) { in hpsa_do_reset()
2923 struct CommandList *c = h->cmd_pool + i; in hpsa_do_reset()
2926 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) { in hpsa_do_reset()
2936 spin_lock_irqsave(&h->lock, flags); /* Implied MB */ in hpsa_do_reset()
2941 spin_unlock_irqrestore(&h->lock, flags); in hpsa_do_reset()
2944 cmd_free(h, c); in hpsa_do_reset()
2947 rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue); in hpsa_do_reset()
2949 wait_event(h->event_sync_wait_queue, in hpsa_do_reset()
2951 lockup_detected(h)); in hpsa_do_reset()
2953 if (unlikely(lockup_detected(h))) { in hpsa_do_reset()
2954 dev_warn(&h->pdev->dev, in hpsa_do_reset()
2962 mutex_unlock(&h->reset_mutex); in hpsa_do_reset()
2966 static void hpsa_get_raid_level(struct ctlr_info *h, in hpsa_get_raid_level() argument
2976 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64); in hpsa_get_raid_level()
2987 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, in hpsa_debug_map_buff() argument
2998 if (h->raid_offload_debug < 2) in hpsa_debug_map_buff()
3001 dev_info(&h->pdev->dev, "structure_size = %u\n", in hpsa_debug_map_buff()
3003 dev_info(&h->pdev->dev, "volume_blk_size = %u\n", in hpsa_debug_map_buff()
3005 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", in hpsa_debug_map_buff()
3007 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", in hpsa_debug_map_buff()
3009 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", in hpsa_debug_map_buff()
3011 dev_info(&h->pdev->dev, "strip_size = %u\n", in hpsa_debug_map_buff()
3013 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", in hpsa_debug_map_buff()
3015 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", in hpsa_debug_map_buff()
3017 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", in hpsa_debug_map_buff()
3019 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", in hpsa_debug_map_buff()
3021 dev_info(&h->pdev->dev, "row_cnt = %u\n", in hpsa_debug_map_buff()
3023 dev_info(&h->pdev->dev, "layout_map_count = %u\n", in hpsa_debug_map_buff()
3025 dev_info(&h->pdev->dev, "flags = 0x%x\n", in hpsa_debug_map_buff()
3027 dev_info(&h->pdev->dev, "encrypytion = %s\n", in hpsa_debug_map_buff()
3030 dev_info(&h->pdev->dev, "dekindex = %u\n", in hpsa_debug_map_buff()
3034 dev_info(&h->pdev->dev, "Map%u:\n", map); in hpsa_debug_map_buff()
3037 dev_info(&h->pdev->dev, " Row%u:\n", row); in hpsa_debug_map_buff()
3041 dev_info(&h->pdev->dev, in hpsa_debug_map_buff()
3048 dev_info(&h->pdev->dev, in hpsa_debug_map_buff()
3056 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, in hpsa_debug_map_buff() argument
3063 static int hpsa_get_raid_map(struct ctlr_info *h, in hpsa_get_raid_map() argument
3070 c = cmd_alloc(h); in hpsa_get_raid_map()
3072 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, in hpsa_get_raid_map()
3075 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n"); in hpsa_get_raid_map()
3076 cmd_free(h, c); in hpsa_get_raid_map()
3079 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, in hpsa_get_raid_map()
3085 hpsa_scsi_interpret_error(h, c); in hpsa_get_raid_map()
3089 cmd_free(h, c); in hpsa_get_raid_map()
3094 dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); in hpsa_get_raid_map()
3097 hpsa_debug_map_buff(h, rc, &this_device->raid_map); in hpsa_get_raid_map()
3100 cmd_free(h, c); in hpsa_get_raid_map()
3104 static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h, in hpsa_bmic_sense_subsystem_information() argument
3112 c = cmd_alloc(h); in hpsa_bmic_sense_subsystem_information()
3114 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize, in hpsa_bmic_sense_subsystem_information()
3122 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, in hpsa_bmic_sense_subsystem_information()
3128 hpsa_scsi_interpret_error(h, c); in hpsa_bmic_sense_subsystem_information()
3132 cmd_free(h, c); in hpsa_bmic_sense_subsystem_information()
3136 static int hpsa_bmic_id_controller(struct ctlr_info *h, in hpsa_bmic_id_controller() argument
3143 c = cmd_alloc(h); in hpsa_bmic_id_controller()
3145 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize, in hpsa_bmic_id_controller()
3150 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, in hpsa_bmic_id_controller()
3156 hpsa_scsi_interpret_error(h, c); in hpsa_bmic_id_controller()
3160 cmd_free(h, c); in hpsa_bmic_id_controller()
3164 static int hpsa_bmic_id_physical_device(struct ctlr_info *h, in hpsa_bmic_id_physical_device() argument
3172 c = cmd_alloc(h); in hpsa_bmic_id_physical_device()
3173 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize, in hpsa_bmic_id_physical_device()
3181 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, in hpsa_bmic_id_physical_device()
3185 hpsa_scsi_interpret_error(h, c); in hpsa_bmic_id_physical_device()
3189 cmd_free(h, c); in hpsa_bmic_id_physical_device()
3194 static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h, in hpsa_get_sas_address_from_report_physical() argument
3206 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { in hpsa_get_sas_address_from_report_physical()
3207 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); in hpsa_get_sas_address_from_report_physical()
3224 static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr, in hpsa_get_sas_address() argument
3235 dev_warn(&h->pdev->dev, in hpsa_get_sas_address()
3240 rc = hpsa_bmic_sense_subsystem_information(h, in hpsa_get_sas_address()
3244 h->sas_address = sa; in hpsa_get_sas_address()
3249 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr); in hpsa_get_sas_address()
3255 static int hpsa_vpd_page_supported(struct ctlr_info *h, in hpsa_vpd_page_supported() argument
3268 rc = hpsa_scsi_do_inquiry(h, scsi3addr, in hpsa_vpd_page_supported()
3280 rc = hpsa_scsi_do_inquiry(h, scsi3addr, in hpsa_vpd_page_supported()
3298 static void hpsa_get_ioaccel_status(struct ctlr_info *h, in hpsa_get_ioaccel_status() argument
3312 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) in hpsa_get_ioaccel_status()
3314 rc = hpsa_scsi_do_inquiry(h, scsi3addr, in hpsa_get_ioaccel_status()
3328 if (hpsa_get_raid_map(h, scsi3addr, this_device)) in hpsa_get_ioaccel_status()
3338 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, in hpsa_get_device_id() argument
3349 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); in hpsa_get_device_id()
3358 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, in hpsa_scsi_do_report_luns() argument
3367 c = cmd_alloc(h); in hpsa_scsi_do_report_luns()
3371 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, in hpsa_scsi_do_report_luns()
3378 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, in hpsa_scsi_do_report_luns()
3385 hpsa_scsi_interpret_error(h, c); in hpsa_scsi_do_report_luns()
3391 dev_err(&h->pdev->dev, in hpsa_scsi_do_report_luns()
3399 cmd_free(h, c); in hpsa_scsi_do_report_luns()
3403 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, in hpsa_scsi_do_report_phys_luns() argument
3406 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, in hpsa_scsi_do_report_phys_luns()
3410 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, in hpsa_scsi_do_report_log_luns() argument
3413 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); in hpsa_scsi_do_report_log_luns()
3425 static int hpsa_get_volume_status(struct ctlr_info *h, in hpsa_get_volume_status() argument
3438 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) in hpsa_get_volume_status()
3442 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, in hpsa_get_volume_status()
3449 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, in hpsa_get_volume_status()
3469 static int hpsa_volume_offline(struct ctlr_info *h, in hpsa_volume_offline() argument
3483 c = cmd_alloc(h); in hpsa_volume_offline()
3485 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); in hpsa_volume_offline()
3486 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); in hpsa_volume_offline()
3488 cmd_free(h, c); in hpsa_volume_offline()
3499 cmd_free(h, c); in hpsa_volume_offline()
3509 ldstat = hpsa_get_volume_status(h, scsi3addr); in hpsa_volume_offline()
3545 static int hpsa_device_supports_aborts(struct ctlr_info *h, in hpsa_device_supports_aborts() argument
3558 c = cmd_alloc(h); in hpsa_device_supports_aborts()
3560 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG); in hpsa_device_supports_aborts()
3561 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); in hpsa_device_supports_aborts()
3573 rc = hpsa_evaluate_tmf_status(h, c); in hpsa_device_supports_aborts()
3579 cmd_free(h, c); in hpsa_device_supports_aborts()
3595 static int hpsa_update_device_info(struct ctlr_info *h, in hpsa_update_device_info() argument
3616 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, in hpsa_update_device_info()
3619 dev_err(&h->pdev->dev, in hpsa_update_device_info()
3636 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, in hpsa_update_device_info()
3643 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); in hpsa_update_device_info()
3644 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) in hpsa_update_device_info()
3645 hpsa_get_ioaccel_status(h, scsi3addr, this_device); in hpsa_update_device_info()
3646 volume_offline = hpsa_volume_offline(h, scsi3addr); in hpsa_update_device_info()
3657 this_device->queue_depth = h->nr_cmds; in hpsa_update_device_info()
3677 static void hpsa_update_device_supports_aborts(struct ctlr_info *h, in hpsa_update_device_supports_aborts() argument
3687 spin_lock_irqsave(&h->devlock, flags); in hpsa_update_device_supports_aborts()
3688 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry); in hpsa_update_device_supports_aborts()
3690 entry >= 0 && entry < h->ndevices) { in hpsa_update_device_supports_aborts()
3691 dev->supports_aborts = h->dev[entry]->supports_aborts; in hpsa_update_device_supports_aborts()
3692 spin_unlock_irqrestore(&h->devlock, flags); in hpsa_update_device_supports_aborts()
3694 spin_unlock_irqrestore(&h->devlock, flags); in hpsa_update_device_supports_aborts()
3696 hpsa_device_supports_aborts(h, scsi3addr); in hpsa_update_device_supports_aborts()
3708 static void figure_bus_target_lun(struct ctlr_info *h, in figure_bus_target_lun() argument
3744 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, in hpsa_get_pdisk_of_ioaccel2() argument
3748 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; in hpsa_get_pdisk_of_ioaccel2()
3752 spin_lock_irqsave(&h->devlock, flags); in hpsa_get_pdisk_of_ioaccel2()
3753 for (i = 0; i < h->ndevices; i++) in hpsa_get_pdisk_of_ioaccel2()
3754 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) { in hpsa_get_pdisk_of_ioaccel2()
3755 memcpy(scsi3addr, h->dev[i]->scsi3addr, in hpsa_get_pdisk_of_ioaccel2()
3756 sizeof(h->dev[i]->scsi3addr)); in hpsa_get_pdisk_of_ioaccel2()
3757 spin_unlock_irqrestore(&h->devlock, flags); in hpsa_get_pdisk_of_ioaccel2()
3760 spin_unlock_irqrestore(&h->devlock, flags); in hpsa_get_pdisk_of_ioaccel2()
3764 static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position, in figure_external_status() argument
3791 static int hpsa_gather_lun_info(struct ctlr_info *h, in hpsa_gather_lun_info() argument
3795 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { in hpsa_gather_lun_info()
3796 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); in hpsa_gather_lun_info()
3801 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n", in hpsa_gather_lun_info()
3805 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) { in hpsa_gather_lun_info()
3806 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); in hpsa_gather_lun_info()
3812 dev_warn(&h->pdev->dev, in hpsa_gather_lun_info()
3819 dev_warn(&h->pdev->dev, in hpsa_gather_lun_info()
3828 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, in figure_lunaddrbytes() argument
3856 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, in hpsa_get_ioaccel_drive_info() argument
3868 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0], in hpsa_get_ioaccel_drive_info()
3909 static int hpsa_set_local_logical_count(struct ctlr_info *h, in hpsa_set_local_logical_count() argument
3916 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n", in hpsa_set_local_logical_count()
3921 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr)); in hpsa_set_local_logical_count()
3934 static void hpsa_update_scsi_devices(struct ctlr_info *h) in hpsa_update_scsi_devices() argument
3970 dev_err(&h->pdev->dev, "out of memory\n"); in hpsa_update_scsi_devices()
3975 h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */ in hpsa_update_scsi_devices()
3977 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals, in hpsa_update_scsi_devices()
3979 h->drv_req_rescan = 1; in hpsa_update_scsi_devices()
3984 if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) { in hpsa_update_scsi_devices()
3985 dev_warn(&h->pdev->dev, in hpsa_update_scsi_devices()
3999 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." in hpsa_update_scsi_devices()
4007 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", in hpsa_update_scsi_devices()
4009 h->drv_req_rescan = 1; in hpsa_update_scsi_devices()
4015 if (is_scsi_rev_5(h)) in hpsa_update_scsi_devices()
4030 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, in hpsa_update_scsi_devices()
4039 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice, in hpsa_update_scsi_devices()
4042 dev_warn(&h->pdev->dev, in hpsa_update_scsi_devices()
4044 h->drv_req_rescan = 1; in hpsa_update_scsi_devices()
4048 dev_warn(&h->pdev->dev, in hpsa_update_scsi_devices()
4055 figure_external_status(h, raid_ctlr_position, i, in hpsa_update_scsi_devices()
4058 figure_bus_target_lun(h, lunaddrbytes, tmpdevice); in hpsa_update_scsi_devices()
4059 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes); in hpsa_update_scsi_devices()
4065 if (!h->discovery_polling) { in hpsa_update_scsi_devices()
4067 h->discovery_polling = 1; in hpsa_update_scsi_devices()
4068 dev_info(&h->pdev->dev, in hpsa_update_scsi_devices()
4091 hpsa_get_sas_address(h, lunaddrbytes, this_device); in hpsa_update_scsi_devices()
4110 hpsa_get_ioaccel_drive_info(h, this_device, in hpsa_update_scsi_devices()
4139 if (h->sas_host == NULL) { in hpsa_update_scsi_devices()
4142 rc = hpsa_add_sas_host(h); in hpsa_update_scsi_devices()
4144 dev_warn(&h->pdev->dev, in hpsa_update_scsi_devices()
4150 adjust_hpsa_scsi_table(h, currentsd, ncurrent); in hpsa_update_scsi_devices()
4178 static int hpsa_scatter_gather(struct ctlr_info *h, in hpsa_scatter_gather() argument
4186 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); in hpsa_scatter_gather()
4203 chained = use_sg > h->max_cmd_sg_entries; in hpsa_scatter_gather()
4204 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg; in hpsa_scatter_gather()
4218 curr_sg = h->cmd_sg_list[cp->cmdindex]; in hpsa_scatter_gather()
4229 if (use_sg + chained > h->maxSG) in hpsa_scatter_gather()
4230 h->maxSG = use_sg + chained; in hpsa_scatter_gather()
4233 cp->Header.SGList = h->max_cmd_sg_entries; in hpsa_scatter_gather()
4235 if (hpsa_map_sg_chain_block(h, cp)) { in hpsa_scatter_gather()
4292 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, in hpsa_scsi_ioaccel1_queue_command() argument
4297 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; in hpsa_scsi_ioaccel1_queue_command()
4307 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { in hpsa_scsi_ioaccel1_queue_command()
4322 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + in hpsa_scsi_ioaccel1_queue_command()
4356 dev_err(&h->pdev->dev, "unknown data direction: %d\n", in hpsa_scsi_ioaccel1_queue_command()
4375 enqueue_cmd_and_start_io(h, c); in hpsa_scsi_ioaccel1_queue_command()
4383 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, in hpsa_scsi_ioaccel_direct_map() argument
4391 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, in hpsa_scsi_ioaccel_direct_map()
4398 static void set_encrypt_ioaccel2(struct ctlr_info *h, in set_encrypt_ioaccel2() argument
4437 dev_err(&h->pdev->dev, in set_encrypt_ioaccel2()
4452 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, in hpsa_scsi_ioaccel2_queue_command() argument
4457 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; in hpsa_scsi_ioaccel2_queue_command()
4465 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); in hpsa_scsi_ioaccel2_queue_command()
4474 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + in hpsa_scsi_ioaccel2_queue_command()
4489 if (use_sg > h->ioaccel_maxsg) { in hpsa_scsi_ioaccel2_queue_command()
4491 h->ioaccel2_cmd_sg_list[c->cmdindex]->address); in hpsa_scsi_ioaccel2_queue_command()
4499 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex]; in hpsa_scsi_ioaccel2_queue_command()
4528 dev_err(&h->pdev->dev, "unknown data direction: %d\n", in hpsa_scsi_ioaccel2_queue_command()
4539 set_encrypt_ioaccel2(h, c, cp); in hpsa_scsi_ioaccel2_queue_command()
4551 if (use_sg > h->ioaccel_maxsg) { in hpsa_scsi_ioaccel2_queue_command()
4554 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) { in hpsa_scsi_ioaccel2_queue_command()
4562 enqueue_cmd_and_start_io(h, c); in hpsa_scsi_ioaccel2_queue_command()
4569 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, in hpsa_scsi_ioaccel_queue_command() argument
4579 if (h->transMethod & CFGTBL_Trans_io_accel1) in hpsa_scsi_ioaccel_queue_command()
4580 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, in hpsa_scsi_ioaccel_queue_command()
4584 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, in hpsa_scsi_ioaccel_queue_command()
4618 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, in hpsa_scsi_ioaccel_raid_map() argument
4937 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, in hpsa_scsi_ioaccel_raid_map()
4947 static int hpsa_ciss_submit(struct ctlr_info *h, in hpsa_ciss_submit() argument
4996 dev_err(&h->pdev->dev, "unknown data direction: %d\n", in hpsa_ciss_submit()
5002 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ in hpsa_ciss_submit()
5003 hpsa_cmd_resolve_and_free(h, c); in hpsa_ciss_submit()
5006 enqueue_cmd_and_start_io(h, c); in hpsa_ciss_submit()
5011 static void hpsa_cmd_init(struct ctlr_info *h, int index, in hpsa_cmd_init() argument
5019 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); in hpsa_cmd_init()
5020 c->err_info = h->errinfo_pool + index; in hpsa_cmd_init()
5022 err_dma_handle = h->errinfo_pool_dhandle in hpsa_cmd_init()
5028 c->h = h; in hpsa_cmd_init()
5032 static void hpsa_preinitialize_commands(struct ctlr_info *h) in hpsa_preinitialize_commands() argument
5036 for (i = 0; i < h->nr_cmds; i++) { in hpsa_preinitialize_commands()
5037 struct CommandList *c = h->cmd_pool + i; in hpsa_preinitialize_commands()
5039 hpsa_cmd_init(h, i, c); in hpsa_preinitialize_commands()
5044 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index, in hpsa_cmd_partial_init() argument
5047 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); in hpsa_cmd_partial_init()
5056 static int hpsa_ioaccel_submit(struct ctlr_info *h, in hpsa_ioaccel_submit() argument
5066 hpsa_cmd_init(h, c->cmdindex, c); in hpsa_ioaccel_submit()
5069 rc = hpsa_scsi_ioaccel_raid_map(h, c); in hpsa_ioaccel_submit()
5073 hpsa_cmd_init(h, c->cmdindex, c); in hpsa_ioaccel_submit()
5076 rc = hpsa_scsi_ioaccel_direct_map(h, c); in hpsa_ioaccel_submit()
5093 return hpsa_cmd_free_and_done(c->h, c, cmd); in hpsa_command_resubmit_worker()
5096 return hpsa_cmd_resolve_and_free(c->h, c); in hpsa_command_resubmit_worker()
5098 return hpsa_cmd_abort_and_free(c->h, c, cmd); in hpsa_command_resubmit_worker()
5100 struct ctlr_info *h = c->h; in hpsa_command_resubmit_worker() local
5101 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; in hpsa_command_resubmit_worker()
5106 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr); in hpsa_command_resubmit_worker()
5116 return hpsa_cmd_free_and_done(h, c, cmd); in hpsa_command_resubmit_worker()
5121 hpsa_cmd_partial_init(c->h, c->cmdindex, c); in hpsa_command_resubmit_worker()
5122 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) { in hpsa_command_resubmit_worker()
5139 struct ctlr_info *h; in hpsa_scsi_queue_command() local
5146 h = sdev_to_hba(cmd->device); in hpsa_scsi_queue_command()
5159 if (unlikely(lockup_detected(h))) { in hpsa_scsi_queue_command()
5164 c = cmd_tagged_alloc(h, cmd); in hpsa_scsi_queue_command()
5172 h->acciopath_status)) { in hpsa_scsi_queue_command()
5173 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr); in hpsa_scsi_queue_command()
5177 hpsa_cmd_resolve_and_free(h, c); in hpsa_scsi_queue_command()
5181 return hpsa_ciss_submit(h, c, cmd, scsi3addr); in hpsa_scsi_queue_command()
5184 static void hpsa_scan_complete(struct ctlr_info *h) in hpsa_scan_complete() argument
5188 spin_lock_irqsave(&h->scan_lock, flags); in hpsa_scan_complete()
5189 h->scan_finished = 1; in hpsa_scan_complete()
5190 wake_up_all(&h->scan_wait_queue); in hpsa_scan_complete()
5191 spin_unlock_irqrestore(&h->scan_lock, flags); in hpsa_scan_complete()
5196 struct ctlr_info *h = shost_to_hba(sh); in hpsa_scan_start() local
5205 if (unlikely(lockup_detected(h))) in hpsa_scan_start()
5206 return hpsa_scan_complete(h); in hpsa_scan_start()
5210 spin_lock_irqsave(&h->scan_lock, flags); in hpsa_scan_start()
5211 if (h->scan_finished) in hpsa_scan_start()
5213 spin_unlock_irqrestore(&h->scan_lock, flags); in hpsa_scan_start()
5214 wait_event(h->scan_wait_queue, h->scan_finished); in hpsa_scan_start()
5221 h->scan_finished = 0; /* mark scan as in progress */ in hpsa_scan_start()
5222 spin_unlock_irqrestore(&h->scan_lock, flags); in hpsa_scan_start()
5224 if (unlikely(lockup_detected(h))) in hpsa_scan_start()
5225 return hpsa_scan_complete(h); in hpsa_scan_start()
5227 hpsa_update_scsi_devices(h); in hpsa_scan_start()
5229 hpsa_scan_complete(h); in hpsa_scan_start()
5250 struct ctlr_info *h = shost_to_hba(sh); in hpsa_scan_finished() local
5254 spin_lock_irqsave(&h->scan_lock, flags); in hpsa_scan_finished()
5255 finished = h->scan_finished; in hpsa_scan_finished()
5256 spin_unlock_irqrestore(&h->scan_lock, flags); in hpsa_scan_finished()
5260 static int hpsa_scsi_host_alloc(struct ctlr_info *h) in hpsa_scsi_host_alloc() argument
5264 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); in hpsa_scsi_host_alloc()
5266 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); in hpsa_scsi_host_alloc()
5277 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS; in hpsa_scsi_host_alloc()
5279 sh->sg_tablesize = h->maxsgentries; in hpsa_scsi_host_alloc()
5281 sh->hostdata[0] = (unsigned long) h; in hpsa_scsi_host_alloc()
5282 sh->irq = h->intr[h->intr_mode]; in hpsa_scsi_host_alloc()
5285 h->scsi_host = sh; in hpsa_scsi_host_alloc()
5289 static int hpsa_scsi_add_host(struct ctlr_info *h) in hpsa_scsi_add_host() argument
5293 rv = scsi_add_host(h->scsi_host, &h->pdev->dev); in hpsa_scsi_add_host()
5295 dev_err(&h->pdev->dev, "scsi_add_host failed\n"); in hpsa_scsi_add_host()
5298 scsi_scan_host(h->scsi_host); in hpsa_scsi_add_host()
5323 static int hpsa_send_test_unit_ready(struct ctlr_info *h, in hpsa_send_test_unit_ready() argument
5330 (void) fill_cmd(c, TEST_UNIT_READY, h, in hpsa_send_test_unit_ready()
5332 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); in hpsa_send_test_unit_ready()
5359 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h, in hpsa_wait_for_test_unit_ready() argument
5376 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue); in hpsa_wait_for_test_unit_ready()
5384 dev_warn(&h->pdev->dev, in hpsa_wait_for_test_unit_ready()
5392 static int wait_for_device_to_become_ready(struct ctlr_info *h, in wait_for_device_to_become_ready() argument
5402 c = cmd_alloc(h); in wait_for_device_to_become_ready()
5411 last_queue = h->nreply_queues - 1; in wait_for_device_to_become_ready()
5418 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq); in wait_for_device_to_become_ready()
5424 dev_warn(&h->pdev->dev, "giving up on device.\n"); in wait_for_device_to_become_ready()
5426 dev_warn(&h->pdev->dev, "device is ready.\n"); in wait_for_device_to_become_ready()
5428 cmd_free(h, c); in wait_for_device_to_become_ready()
5438 struct ctlr_info *h; in hpsa_eh_device_reset_handler() local
5444 h = sdev_to_hba(scsicmd->device); in hpsa_eh_device_reset_handler()
5445 if (h == NULL) /* paranoia */ in hpsa_eh_device_reset_handler()
5448 if (lockup_detected(h)) in hpsa_eh_device_reset_handler()
5453 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__); in hpsa_eh_device_reset_handler()
5458 if (lockup_detected(h)) { in hpsa_eh_device_reset_handler()
5462 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); in hpsa_eh_device_reset_handler()
5467 if (detect_controller_lockup(h)) { in hpsa_eh_device_reset_handler()
5471 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); in hpsa_eh_device_reset_handler()
5486 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); in hpsa_eh_device_reset_handler()
5488 h->reset_in_progress = 1; in hpsa_eh_device_reset_handler()
5491 rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type, in hpsa_eh_device_reset_handler()
5496 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); in hpsa_eh_device_reset_handler()
5497 h->reset_in_progress = 0; in hpsa_eh_device_reset_handler()
5516 static void hpsa_get_tag(struct ctlr_info *h, in hpsa_get_tag() argument
5522 &h->ioaccel_cmd_pool[c->cmdindex]; in hpsa_get_tag()
5530 &h->ioaccel2_cmd_pool[c->cmdindex]; in hpsa_get_tag()
5541 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, in hpsa_send_abort() argument
5549 c = cmd_alloc(h); in hpsa_send_abort()
5552 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag, in hpsa_send_abort()
5554 if (h->needs_abort_tags_swizzled) in hpsa_send_abort()
5556 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); in hpsa_send_abort()
5557 hpsa_get_tag(h, abort, &taglower, &tagupper); in hpsa_send_abort()
5558 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n", in hpsa_send_abort()
5567 rc = hpsa_evaluate_tmf_status(h, c); in hpsa_send_abort()
5573 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", in hpsa_send_abort()
5575 hpsa_scsi_interpret_error(h, c); in hpsa_send_abort()
5579 cmd_free(h, c); in hpsa_send_abort()
5580 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", in hpsa_send_abort()
5585 static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h, in setup_ioaccel2_abort_cmd() argument
5588 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; in setup_ioaccel2_abort_cmd()
5591 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex]; in setup_ioaccel2_abort_cmd()
5610 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + in setup_ioaccel2_abort_cmd()
5634 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, in hpsa_send_reset_as_abort_ioaccel2() argument
5647 dev_warn(&h->pdev->dev, in hpsa_send_reset_as_abort_ioaccel2()
5652 if (h->raid_offload_debug > 0) in hpsa_send_reset_as_abort_ioaccel2()
5653 dev_info(&h->pdev->dev, in hpsa_send_reset_as_abort_ioaccel2()
5655 h->scsi_host->host_no, dev->bus, dev->target, dev->lun, in hpsa_send_reset_as_abort_ioaccel2()
5661 dev_warn(&h->pdev->dev, in hpsa_send_reset_as_abort_ioaccel2()
5667 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) { in hpsa_send_reset_as_abort_ioaccel2()
5668 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n"); in hpsa_send_reset_as_abort_ioaccel2()
5673 if (h->raid_offload_debug > 0) in hpsa_send_reset_as_abort_ioaccel2()
5674 dev_info(&h->pdev->dev, in hpsa_send_reset_as_abort_ioaccel2()
5678 rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue); in hpsa_send_reset_as_abort_ioaccel2()
5680 dev_warn(&h->pdev->dev, in hpsa_send_reset_as_abort_ioaccel2()
5688 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) { in hpsa_send_reset_as_abort_ioaccel2()
5689 dev_warn(&h->pdev->dev, in hpsa_send_reset_as_abort_ioaccel2()
5697 dev_info(&h->pdev->dev, in hpsa_send_reset_as_abort_ioaccel2()
5705 static int hpsa_send_abort_ioaccel2(struct ctlr_info *h, in hpsa_send_abort_ioaccel2() argument
5718 c = cmd_alloc(h); in hpsa_send_abort_ioaccel2()
5719 setup_ioaccel2_abort_cmd(c, h, abort, reply_queue); in hpsa_send_abort_ioaccel2()
5720 c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; in hpsa_send_abort_ioaccel2()
5721 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); in hpsa_send_abort_ioaccel2()
5722 hpsa_get_tag(h, abort, &taglower, &tagupper); in hpsa_send_abort_ioaccel2()
5723 dev_dbg(&h->pdev->dev, in hpsa_send_abort_ioaccel2()
5728 dev_dbg(&h->pdev->dev, in hpsa_send_abort_ioaccel2()
5742 dev_warn(&h->pdev->dev, in hpsa_send_abort_ioaccel2()
5748 cmd_free(h, c); in hpsa_send_abort_ioaccel2()
5749 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__, in hpsa_send_abort_ioaccel2()
5754 static int hpsa_send_abort_both_ways(struct ctlr_info *h, in hpsa_send_abort_both_ways() argument
5764 if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) in hpsa_send_abort_both_ways()
5765 return hpsa_send_abort_ioaccel2(h, abort, in hpsa_send_abort_both_ways()
5768 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, in hpsa_send_abort_both_ways()
5771 return hpsa_send_abort(h, scsi3addr, abort, reply_queue); in hpsa_send_abort_both_ways()
5775 static int hpsa_extract_reply_queue(struct ctlr_info *h, in hpsa_extract_reply_queue() argument
5779 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue; in hpsa_extract_reply_queue()
5787 static inline int wait_for_available_abort_cmd(struct ctlr_info *h) in wait_for_available_abort_cmd() argument
5790 return !wait_event_timeout(h->abort_cmd_wait_queue, in wait_for_available_abort_cmd()
5791 atomic_dec_if_positive(&h->abort_cmds_available) >= 0, in wait_for_available_abort_cmd()
5803 struct ctlr_info *h; in hpsa_eh_abort_handler() local
5819 h = sdev_to_hba(sc->device); in hpsa_eh_abort_handler()
5820 if (h == NULL) in hpsa_eh_abort_handler()
5826 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n", in hpsa_eh_abort_handler()
5832 if (lockup_detected(h)) { in hpsa_eh_abort_handler()
5833 hpsa_show_dev_msg(KERN_WARNING, h, dev, in hpsa_eh_abort_handler()
5839 if (detect_controller_lockup(h)) { in hpsa_eh_abort_handler()
5840 hpsa_show_dev_msg(KERN_WARNING, h, dev, in hpsa_eh_abort_handler()
5846 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && in hpsa_eh_abort_handler()
5847 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) in hpsa_eh_abort_handler()
5852 h->scsi_host->host_no, sc->device->channel, in hpsa_eh_abort_handler()
5864 cmd_free(h, abort); in hpsa_eh_abort_handler()
5871 cmd_free(h, abort); in hpsa_eh_abort_handler()
5880 cmd_free(h, abort); in hpsa_eh_abort_handler()
5885 hpsa_get_tag(h, abort, &taglower, &tagupper); in hpsa_eh_abort_handler()
5886 reply_queue = hpsa_extract_reply_queue(h, abort); in hpsa_eh_abort_handler()
5894 dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg); in hpsa_eh_abort_handler()
5895 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command"); in hpsa_eh_abort_handler()
5902 if (wait_for_available_abort_cmd(h)) { in hpsa_eh_abort_handler()
5903 dev_warn(&h->pdev->dev, in hpsa_eh_abort_handler()
5906 cmd_free(h, abort); in hpsa_eh_abort_handler()
5909 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue); in hpsa_eh_abort_handler()
5910 atomic_inc(&h->abort_cmds_available); in hpsa_eh_abort_handler()
5911 wake_up_all(&h->abort_cmd_wait_queue); in hpsa_eh_abort_handler()
5913 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg); in hpsa_eh_abort_handler()
5914 hpsa_show_dev_msg(KERN_WARNING, h, dev, in hpsa_eh_abort_handler()
5916 cmd_free(h, abort); in hpsa_eh_abort_handler()
5919 dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg); in hpsa_eh_abort_handler()
5920 wait_event(h->event_sync_wait_queue, in hpsa_eh_abort_handler()
5921 abort->scsi_cmd != sc || lockup_detected(h)); in hpsa_eh_abort_handler()
5922 cmd_free(h, abort); in hpsa_eh_abort_handler()
5923 return !lockup_detected(h) ? SUCCESS : FAILED; in hpsa_eh_abort_handler()
5932 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, in cmd_tagged_alloc() argument
5936 struct CommandList *c = h->cmd_pool + idx; in cmd_tagged_alloc()
5938 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) { in cmd_tagged_alloc()
5939 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n", in cmd_tagged_alloc()
5940 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1); in cmd_tagged_alloc()
5955 dev_err(&h->pdev->dev, in cmd_tagged_alloc()
5963 hpsa_cmd_partial_init(h, idx, c); in cmd_tagged_alloc()
5967 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c) in cmd_tagged_free() argument
5987 static struct CommandList *cmd_alloc(struct ctlr_info *h) in cmd_alloc() argument
6013 i = find_next_zero_bit(h->cmd_pool_bits, in cmd_alloc()
6020 c = h->cmd_pool + i; in cmd_alloc()
6023 cmd_free(h, c); /* already in use */ in cmd_alloc()
6028 h->cmd_pool_bits + (i / BITS_PER_LONG)); in cmd_alloc()
6031 hpsa_cmd_partial_init(h, i, c); in cmd_alloc()
6041 static void cmd_free(struct ctlr_info *h, struct CommandList *c) in cmd_free() argument
6046 i = c - h->cmd_pool; in cmd_free()
6048 h->cmd_pool_bits + (i / BITS_PER_LONG)); in cmd_free()
6159 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) in hpsa_getpciinfo_ioctl() argument
6165 pciinfo.domain = pci_domain_nr(h->pdev->bus); in hpsa_getpciinfo_ioctl()
6166 pciinfo.bus = h->pdev->bus->number; in hpsa_getpciinfo_ioctl()
6167 pciinfo.dev_fn = h->pdev->devfn; in hpsa_getpciinfo_ioctl()
6168 pciinfo.board_id = h->board_id; in hpsa_getpciinfo_ioctl()
6174 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) in hpsa_getdrivver_ioctl() argument
6183 dev_info(&h->pdev->dev, "driver version string '%s' " in hpsa_getdrivver_ioctl()
6197 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) in hpsa_passthru_ioctl() argument
6230 c = cmd_alloc(h); in hpsa_passthru_ioctl()
6252 temp64 = pci_map_single(h->pdev, buff, in hpsa_passthru_ioctl()
6254 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { in hpsa_passthru_ioctl()
6264 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); in hpsa_passthru_ioctl()
6266 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); in hpsa_passthru_ioctl()
6267 check_ioctl_unit_attention(h, c); in hpsa_passthru_ioctl()
6289 cmd_free(h, c); in hpsa_passthru_ioctl()
6295 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) in hpsa_big_passthru_ioctl() argument
6367 c = cmd_alloc(h); in hpsa_big_passthru_ioctl()
6379 temp64 = pci_map_single(h->pdev, buff[i], in hpsa_big_passthru_ioctl()
6381 if (dma_mapping_error(&h->pdev->dev, in hpsa_big_passthru_ioctl()
6385 hpsa_pci_unmap(h->pdev, c, i, in hpsa_big_passthru_ioctl()
6396 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); in hpsa_big_passthru_ioctl()
6398 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); in hpsa_big_passthru_ioctl()
6399 check_ioctl_unit_attention(h, c); in hpsa_big_passthru_ioctl()
6426 cmd_free(h, c); in hpsa_big_passthru_ioctl()
6440 static void check_ioctl_unit_attention(struct ctlr_info *h, in check_ioctl_unit_attention() argument
6445 (void) check_for_unit_attention(h, c); in check_ioctl_unit_attention()
6453 struct ctlr_info *h; in hpsa_ioctl() local
6457 h = sdev_to_hba(dev); in hpsa_ioctl()
6463 hpsa_scan_start(h->scsi_host); in hpsa_ioctl()
6466 return hpsa_getpciinfo_ioctl(h, argp); in hpsa_ioctl()
6468 return hpsa_getdrivver_ioctl(h, argp); in hpsa_ioctl()
6470 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) in hpsa_ioctl()
6472 rc = hpsa_passthru_ioctl(h, argp); in hpsa_ioctl()
6473 atomic_inc(&h->passthru_cmds_avail); in hpsa_ioctl()
6476 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) in hpsa_ioctl()
6478 rc = hpsa_big_passthru_ioctl(h, argp); in hpsa_ioctl()
6479 atomic_inc(&h->passthru_cmds_avail); in hpsa_ioctl()
6486 static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, in hpsa_send_host_reset() argument
6491 c = cmd_alloc(h); in hpsa_send_host_reset()
6494 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, in hpsa_send_host_reset()
6498 enqueue_cmd_and_start_io(h, c); in hpsa_send_host_reset()
6506 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, in fill_cmd() argument
6649 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); in fill_cmd()
6687 dev_dbg(&h->pdev->dev, in fill_cmd()
6707 dev_warn(&h->pdev->dev, "unknown message type %d\n", in fill_cmd()
6712 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); in fill_cmd()
6729 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir)) in fill_cmd()
6747 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) in get_next_completion() argument
6749 return h->access.command_completed(h, q); in get_next_completion()
6752 static inline bool interrupt_pending(struct ctlr_info *h) in interrupt_pending() argument
6754 return h->access.intr_pending(h); in interrupt_pending()
6757 static inline long interrupt_not_for_us(struct ctlr_info *h) in interrupt_not_for_us() argument
6759 return (h->access.intr_pending(h) == 0) || in interrupt_not_for_us()
6760 (h->interrupts_enabled == 0); in interrupt_not_for_us()
6763 static inline int bad_tag(struct ctlr_info *h, u32 tag_index, in bad_tag() argument
6766 if (unlikely(tag_index >= h->nr_cmds)) { in bad_tag()
6767 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); in bad_tag()
6775 dial_up_lockup_detection_on_fw_flash_complete(c->h, c); in finish_cmd()
6784 static inline void process_indexed_cmd(struct ctlr_info *h, in process_indexed_cmd() argument
6791 if (!bad_tag(h, tag_index, raw_tag)) { in process_indexed_cmd()
6792 c = h->cmd_pool + tag_index; in process_indexed_cmd()
6802 static int ignore_bogus_interrupt(struct ctlr_info *h) in ignore_bogus_interrupt() argument
6807 if (likely(h->interrupts_enabled)) in ignore_bogus_interrupt()
6810 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " in ignore_bogus_interrupt()
6828 struct ctlr_info *h = queue_to_hba(queue); in hpsa_intx_discard_completions() local
6832 if (ignore_bogus_interrupt(h)) in hpsa_intx_discard_completions()
6835 if (interrupt_not_for_us(h)) in hpsa_intx_discard_completions()
6837 h->last_intr_timestamp = get_jiffies_64(); in hpsa_intx_discard_completions()
6838 while (interrupt_pending(h)) { in hpsa_intx_discard_completions()
6839 raw_tag = get_next_completion(h, q); in hpsa_intx_discard_completions()
6841 raw_tag = next_command(h, q); in hpsa_intx_discard_completions()
6848 struct ctlr_info *h = queue_to_hba(queue); in hpsa_msix_discard_completions() local
6852 if (ignore_bogus_interrupt(h)) in hpsa_msix_discard_completions()
6855 h->last_intr_timestamp = get_jiffies_64(); in hpsa_msix_discard_completions()
6856 raw_tag = get_next_completion(h, q); in hpsa_msix_discard_completions()
6858 raw_tag = next_command(h, q); in hpsa_msix_discard_completions()
6864 struct ctlr_info *h = queue_to_hba((u8 *) queue); in do_hpsa_intr_intx() local
6868 if (interrupt_not_for_us(h)) in do_hpsa_intr_intx()
6870 h->last_intr_timestamp = get_jiffies_64(); in do_hpsa_intr_intx()
6871 while (interrupt_pending(h)) { in do_hpsa_intr_intx()
6872 raw_tag = get_next_completion(h, q); in do_hpsa_intr_intx()
6874 process_indexed_cmd(h, raw_tag); in do_hpsa_intr_intx()
6875 raw_tag = next_command(h, q); in do_hpsa_intr_intx()
6883 struct ctlr_info *h = queue_to_hba(queue); in do_hpsa_intr_msi() local
6887 h->last_intr_timestamp = get_jiffies_64(); in do_hpsa_intr_msi()
6888 raw_tag = get_next_completion(h, q); in do_hpsa_intr_msi()
6890 process_indexed_cmd(h, raw_tag); in do_hpsa_intr_msi()
6891 raw_tag = next_command(h, q); in do_hpsa_intr_msi()
7294 static void hpsa_disable_interrupt_mode(struct ctlr_info *h) in hpsa_disable_interrupt_mode() argument
7296 if (h->msix_vector) { in hpsa_disable_interrupt_mode()
7297 if (h->pdev->msix_enabled) in hpsa_disable_interrupt_mode()
7298 pci_disable_msix(h->pdev); in hpsa_disable_interrupt_mode()
7299 h->msix_vector = 0; in hpsa_disable_interrupt_mode()
7300 } else if (h->msi_vector) { in hpsa_disable_interrupt_mode()
7301 if (h->pdev->msi_enabled) in hpsa_disable_interrupt_mode()
7302 pci_disable_msi(h->pdev); in hpsa_disable_interrupt_mode()
7303 h->msi_vector = 0; in hpsa_disable_interrupt_mode()
7310 static void hpsa_interrupt_mode(struct ctlr_info *h) in hpsa_interrupt_mode() argument
7322 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || in hpsa_interrupt_mode()
7323 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) in hpsa_interrupt_mode()
7325 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { in hpsa_interrupt_mode()
7326 dev_info(&h->pdev->dev, "MSI-X capable controller\n"); in hpsa_interrupt_mode()
7327 h->msix_vector = MAX_REPLY_QUEUES; in hpsa_interrupt_mode()
7328 if (h->msix_vector > num_online_cpus()) in hpsa_interrupt_mode()
7329 h->msix_vector = num_online_cpus(); in hpsa_interrupt_mode()
7330 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries, in hpsa_interrupt_mode()
7331 1, h->msix_vector); in hpsa_interrupt_mode()
7333 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); in hpsa_interrupt_mode()
7334 h->msix_vector = 0; in hpsa_interrupt_mode()
7336 } else if (err < h->msix_vector) { in hpsa_interrupt_mode()
7337 dev_warn(&h->pdev->dev, "only %d MSI-X vectors " in hpsa_interrupt_mode()
7340 h->msix_vector = err; in hpsa_interrupt_mode()
7341 for (i = 0; i < h->msix_vector; i++) in hpsa_interrupt_mode()
7342 h->intr[i] = hpsa_msix_entries[i].vector; in hpsa_interrupt_mode()
7346 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { in hpsa_interrupt_mode()
7347 dev_info(&h->pdev->dev, "MSI capable controller\n"); in hpsa_interrupt_mode()
7348 if (!pci_enable_msi(h->pdev)) in hpsa_interrupt_mode()
7349 h->msi_vector = 1; in hpsa_interrupt_mode()
7351 dev_warn(&h->pdev->dev, "MSI init failed\n"); in hpsa_interrupt_mode()
7356 h->intr[h->intr_mode] = h->pdev->irq; in hpsa_interrupt_mode()
7440 static void hpsa_free_cfgtables(struct ctlr_info *h) in hpsa_free_cfgtables() argument
7442 if (h->transtable) { in hpsa_free_cfgtables()
7443 iounmap(h->transtable); in hpsa_free_cfgtables()
7444 h->transtable = NULL; in hpsa_free_cfgtables()
7446 if (h->cfgtable) { in hpsa_free_cfgtables()
7447 iounmap(h->cfgtable); in hpsa_free_cfgtables()
7448 h->cfgtable = NULL; in hpsa_free_cfgtables()
7455 static int hpsa_find_cfgtables(struct ctlr_info *h) in hpsa_find_cfgtables() argument
7463 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, in hpsa_find_cfgtables()
7467 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, in hpsa_find_cfgtables()
7468 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); in hpsa_find_cfgtables()
7469 if (!h->cfgtable) { in hpsa_find_cfgtables()
7470 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n"); in hpsa_find_cfgtables()
7473 rc = write_driver_ver_to_cfgtable(h->cfgtable); in hpsa_find_cfgtables()
7477 trans_offset = readl(&h->cfgtable->TransMethodOffset); in hpsa_find_cfgtables()
7478 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, in hpsa_find_cfgtables()
7480 sizeof(*h->transtable)); in hpsa_find_cfgtables()
7481 if (!h->transtable) { in hpsa_find_cfgtables()
7482 dev_err(&h->pdev->dev, "Failed mapping transfer table\n"); in hpsa_find_cfgtables()
7483 hpsa_free_cfgtables(h); in hpsa_find_cfgtables()
7489 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) in hpsa_get_max_perf_mode_cmds() argument
7494 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands); in hpsa_get_max_perf_mode_cmds()
7497 if (reset_devices && h->max_commands > 32) in hpsa_get_max_perf_mode_cmds()
7498 h->max_commands = 32; in hpsa_get_max_perf_mode_cmds()
7500 if (h->max_commands < MIN_MAX_COMMANDS) { in hpsa_get_max_perf_mode_cmds()
7501 dev_warn(&h->pdev->dev, in hpsa_get_max_perf_mode_cmds()
7503 h->max_commands, in hpsa_get_max_perf_mode_cmds()
7505 h->max_commands = MIN_MAX_COMMANDS; in hpsa_get_max_perf_mode_cmds()
7513 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h) in hpsa_supports_chained_sg_blocks() argument
7515 return h->maxsgentries > 512; in hpsa_supports_chained_sg_blocks()
7522 static void hpsa_find_board_params(struct ctlr_info *h) in hpsa_find_board_params() argument
7524 hpsa_get_max_perf_mode_cmds(h); in hpsa_find_board_params()
7525 h->nr_cmds = h->max_commands; in hpsa_find_board_params()
7526 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); in hpsa_find_board_params()
7527 h->fw_support = readl(&(h->cfgtable->misc_fw_support)); in hpsa_find_board_params()
7528 if (hpsa_supports_chained_sg_blocks(h)) { in hpsa_find_board_params()
7530 h->max_cmd_sg_entries = 32; in hpsa_find_board_params()
7531 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; in hpsa_find_board_params()
7532 h->maxsgentries--; /* save one for chain pointer */ in hpsa_find_board_params()
7539 h->max_cmd_sg_entries = 31; in hpsa_find_board_params()
7540 h->maxsgentries = 31; /* default to traditional values */ in hpsa_find_board_params()
7541 h->chainsize = 0; in hpsa_find_board_params()
7545 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); in hpsa_find_board_params()
7546 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) in hpsa_find_board_params()
7547 dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); in hpsa_find_board_params()
7548 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) in hpsa_find_board_params()
7549 dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); in hpsa_find_board_params()
7550 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)) in hpsa_find_board_params()
7551 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n"); in hpsa_find_board_params()
7554 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) in hpsa_CISS_signature_present() argument
7556 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { in hpsa_CISS_signature_present()
7557 dev_err(&h->pdev->dev, "not a valid CISS config table\n"); in hpsa_CISS_signature_present()
7563 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) in hpsa_set_driver_support_bits() argument
7567 driver_support = readl(&(h->cfgtable->driver_support)); in hpsa_set_driver_support_bits()
7573 writel(driver_support, &(h->cfgtable->driver_support)); in hpsa_set_driver_support_bits()
7579 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) in hpsa_p600_dma_prefetch_quirk() argument
7583 if (h->board_id != 0x3225103C) in hpsa_p600_dma_prefetch_quirk()
7585 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); in hpsa_p600_dma_prefetch_quirk()
7587 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); in hpsa_p600_dma_prefetch_quirk()
7590 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) in hpsa_wait_for_clear_event_notify_ack() argument
7597 spin_lock_irqsave(&h->lock, flags); in hpsa_wait_for_clear_event_notify_ack()
7598 doorbell_value = readl(h->vaddr + SA5_DOORBELL); in hpsa_wait_for_clear_event_notify_ack()
7599 spin_unlock_irqrestore(&h->lock, flags); in hpsa_wait_for_clear_event_notify_ack()
7610 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h) in hpsa_wait_for_mode_change_ack() argument
7621 if (h->remove_in_progress) in hpsa_wait_for_mode_change_ack()
7623 spin_lock_irqsave(&h->lock, flags); in hpsa_wait_for_mode_change_ack()
7624 doorbell_value = readl(h->vaddr + SA5_DOORBELL); in hpsa_wait_for_mode_change_ack()
7625 spin_unlock_irqrestore(&h->lock, flags); in hpsa_wait_for_mode_change_ack()
7637 static int hpsa_enter_simple_mode(struct ctlr_info *h) in hpsa_enter_simple_mode() argument
7641 trans_support = readl(&(h->cfgtable->TransportSupport)); in hpsa_enter_simple_mode()
7645 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); in hpsa_enter_simple_mode()
7648 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); in hpsa_enter_simple_mode()
7649 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); in hpsa_enter_simple_mode()
7650 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); in hpsa_enter_simple_mode()
7651 if (hpsa_wait_for_mode_change_ack(h)) in hpsa_enter_simple_mode()
7653 print_cfg_table(&h->pdev->dev, h->cfgtable); in hpsa_enter_simple_mode()
7654 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) in hpsa_enter_simple_mode()
7656 h->transMethod = CFGTBL_Trans_Simple; in hpsa_enter_simple_mode()
7659 dev_err(&h->pdev->dev, "failed to enter simple mode\n"); in hpsa_enter_simple_mode()
7664 static void hpsa_free_pci_init(struct ctlr_info *h) in hpsa_free_pci_init() argument
7666 hpsa_free_cfgtables(h); /* pci_init 4 */ in hpsa_free_pci_init()
7667 iounmap(h->vaddr); /* pci_init 3 */ in hpsa_free_pci_init()
7668 h->vaddr = NULL; in hpsa_free_pci_init()
7669 hpsa_disable_interrupt_mode(h); /* pci_init 2 */ in hpsa_free_pci_init()
7674 pci_disable_device(h->pdev); /* pci_init 1 */ in hpsa_free_pci_init()
7675 pci_release_regions(h->pdev); /* pci_init 2 */ in hpsa_free_pci_init()
7679 static int hpsa_pci_init(struct ctlr_info *h) in hpsa_pci_init() argument
7683 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); in hpsa_pci_init()
7686 h->product_name = products[prod_index].product_name; in hpsa_pci_init()
7687 h->access = *(products[prod_index].access); in hpsa_pci_init()
7689 h->needs_abort_tags_swizzled = in hpsa_pci_init()
7690 ctlr_needs_abort_tags_swizzled(h->board_id); in hpsa_pci_init()
7692 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | in hpsa_pci_init()
7695 err = pci_enable_device(h->pdev); in hpsa_pci_init()
7697 dev_err(&h->pdev->dev, "failed to enable PCI device\n"); in hpsa_pci_init()
7698 pci_disable_device(h->pdev); in hpsa_pci_init()
7702 err = pci_request_regions(h->pdev, HPSA); in hpsa_pci_init()
7704 dev_err(&h->pdev->dev, in hpsa_pci_init()
7706 pci_disable_device(h->pdev); in hpsa_pci_init()
7710 pci_set_master(h->pdev); in hpsa_pci_init()
7712 hpsa_interrupt_mode(h); in hpsa_pci_init()
7713 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); in hpsa_pci_init()
7716 h->vaddr = remap_pci_mem(h->paddr, 0x250); in hpsa_pci_init()
7717 if (!h->vaddr) { in hpsa_pci_init()
7718 dev_err(&h->pdev->dev, "failed to remap PCI mem\n"); in hpsa_pci_init()
7722 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); in hpsa_pci_init()
7725 err = hpsa_find_cfgtables(h); in hpsa_pci_init()
7728 hpsa_find_board_params(h); in hpsa_pci_init()
7730 if (!hpsa_CISS_signature_present(h)) { in hpsa_pci_init()
7734 hpsa_set_driver_support_bits(h); in hpsa_pci_init()
7735 hpsa_p600_dma_prefetch_quirk(h); in hpsa_pci_init()
7736 err = hpsa_enter_simple_mode(h); in hpsa_pci_init()
7742 hpsa_free_cfgtables(h); in hpsa_pci_init()
7744 iounmap(h->vaddr); in hpsa_pci_init()
7745 h->vaddr = NULL; in hpsa_pci_init()
7747 hpsa_disable_interrupt_mode(h); in hpsa_pci_init()
7752 pci_disable_device(h->pdev); in hpsa_pci_init()
7753 pci_release_regions(h->pdev); in hpsa_pci_init()
7757 static void hpsa_hba_inquiry(struct ctlr_info *h) in hpsa_hba_inquiry() argument
7762 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); in hpsa_hba_inquiry()
7763 if (!h->hba_inquiry_data) in hpsa_hba_inquiry()
7765 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, in hpsa_hba_inquiry()
7766 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); in hpsa_hba_inquiry()
7768 kfree(h->hba_inquiry_data); in hpsa_hba_inquiry()
7769 h->hba_inquiry_data = NULL; in hpsa_hba_inquiry()
7835 static void hpsa_free_cmd_pool(struct ctlr_info *h) in hpsa_free_cmd_pool() argument
7837 kfree(h->cmd_pool_bits); in hpsa_free_cmd_pool()
7838 h->cmd_pool_bits = NULL; in hpsa_free_cmd_pool()
7839 if (h->cmd_pool) { in hpsa_free_cmd_pool()
7840 pci_free_consistent(h->pdev, in hpsa_free_cmd_pool()
7841 h->nr_cmds * sizeof(struct CommandList), in hpsa_free_cmd_pool()
7842 h->cmd_pool, in hpsa_free_cmd_pool()
7843 h->cmd_pool_dhandle); in hpsa_free_cmd_pool()
7844 h->cmd_pool = NULL; in hpsa_free_cmd_pool()
7845 h->cmd_pool_dhandle = 0; in hpsa_free_cmd_pool()
7847 if (h->errinfo_pool) { in hpsa_free_cmd_pool()
7848 pci_free_consistent(h->pdev, in hpsa_free_cmd_pool()
7849 h->nr_cmds * sizeof(struct ErrorInfo), in hpsa_free_cmd_pool()
7850 h->errinfo_pool, in hpsa_free_cmd_pool()
7851 h->errinfo_pool_dhandle); in hpsa_free_cmd_pool()
7852 h->errinfo_pool = NULL; in hpsa_free_cmd_pool()
7853 h->errinfo_pool_dhandle = 0; in hpsa_free_cmd_pool()
7857 static int hpsa_alloc_cmd_pool(struct ctlr_info *h) in hpsa_alloc_cmd_pool() argument
7859 h->cmd_pool_bits = kzalloc( in hpsa_alloc_cmd_pool()
7860 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * in hpsa_alloc_cmd_pool()
7862 h->cmd_pool = pci_alloc_consistent(h->pdev, in hpsa_alloc_cmd_pool()
7863 h->nr_cmds * sizeof(*h->cmd_pool), in hpsa_alloc_cmd_pool()
7864 &(h->cmd_pool_dhandle)); in hpsa_alloc_cmd_pool()
7865 h->errinfo_pool = pci_alloc_consistent(h->pdev, in hpsa_alloc_cmd_pool()
7866 h->nr_cmds * sizeof(*h->errinfo_pool), in hpsa_alloc_cmd_pool()
7867 &(h->errinfo_pool_dhandle)); in hpsa_alloc_cmd_pool()
7868 if ((h->cmd_pool_bits == NULL) in hpsa_alloc_cmd_pool()
7869 || (h->cmd_pool == NULL) in hpsa_alloc_cmd_pool()
7870 || (h->errinfo_pool == NULL)) { in hpsa_alloc_cmd_pool()
7871 dev_err(&h->pdev->dev, "out of memory in %s", __func__); in hpsa_alloc_cmd_pool()
7874 hpsa_preinitialize_commands(h); in hpsa_alloc_cmd_pool()
7877 hpsa_free_cmd_pool(h); in hpsa_alloc_cmd_pool()
7881 static void hpsa_irq_affinity_hints(struct ctlr_info *h) in hpsa_irq_affinity_hints() argument
7886 for (i = 0; i < h->msix_vector; i++) { in hpsa_irq_affinity_hints()
7887 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); in hpsa_irq_affinity_hints()
7893 static void hpsa_free_irqs(struct ctlr_info *h) in hpsa_free_irqs() argument
7897 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { in hpsa_free_irqs()
7899 i = h->intr_mode; in hpsa_free_irqs()
7900 irq_set_affinity_hint(h->intr[i], NULL); in hpsa_free_irqs()
7901 free_irq(h->intr[i], &h->q[i]); in hpsa_free_irqs()
7902 h->q[i] = 0; in hpsa_free_irqs()
7906 for (i = 0; i < h->msix_vector; i++) { in hpsa_free_irqs()
7907 irq_set_affinity_hint(h->intr[i], NULL); in hpsa_free_irqs()
7908 free_irq(h->intr[i], &h->q[i]); in hpsa_free_irqs()
7909 h->q[i] = 0; in hpsa_free_irqs()
7912 h->q[i] = 0; in hpsa_free_irqs()
7916 static int hpsa_request_irqs(struct ctlr_info *h, in hpsa_request_irqs() argument
7927 h->q[i] = (u8) i; in hpsa_request_irqs()
7929 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { in hpsa_request_irqs()
7931 for (i = 0; i < h->msix_vector; i++) { in hpsa_request_irqs()
7932 sprintf(h->intrname[i], "%s-msix%d", h->devname, i); in hpsa_request_irqs()
7933 rc = request_irq(h->intr[i], msixhandler, in hpsa_request_irqs()
7934 0, h->intrname[i], in hpsa_request_irqs()
7935 &h->q[i]); in hpsa_request_irqs()
7939 dev_err(&h->pdev->dev, in hpsa_request_irqs()
7941 h->intr[i], h->devname); in hpsa_request_irqs()
7943 free_irq(h->intr[j], &h->q[j]); in hpsa_request_irqs()
7944 h->q[j] = 0; in hpsa_request_irqs()
7947 h->q[j] = 0; in hpsa_request_irqs()
7951 hpsa_irq_affinity_hints(h); in hpsa_request_irqs()
7954 if (h->msix_vector > 0 || h->msi_vector) { in hpsa_request_irqs()
7955 if (h->msix_vector) in hpsa_request_irqs()
7956 sprintf(h->intrname[h->intr_mode], in hpsa_request_irqs()
7957 "%s-msix", h->devname); in hpsa_request_irqs()
7959 sprintf(h->intrname[h->intr_mode], in hpsa_request_irqs()
7960 "%s-msi", h->devname); in hpsa_request_irqs()
7961 rc = request_irq(h->intr[h->intr_mode], in hpsa_request_irqs()
7963 h->intrname[h->intr_mode], in hpsa_request_irqs()
7964 &h->q[h->intr_mode]); in hpsa_request_irqs()
7966 sprintf(h->intrname[h->intr_mode], in hpsa_request_irqs()
7967 "%s-intx", h->devname); in hpsa_request_irqs()
7968 rc = request_irq(h->intr[h->intr_mode], in hpsa_request_irqs()
7970 h->intrname[h->intr_mode], in hpsa_request_irqs()
7971 &h->q[h->intr_mode]); in hpsa_request_irqs()
7973 irq_set_affinity_hint(h->intr[h->intr_mode], NULL); in hpsa_request_irqs()
7976 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", in hpsa_request_irqs()
7977 h->intr[h->intr_mode], h->devname); in hpsa_request_irqs()
7978 hpsa_free_irqs(h); in hpsa_request_irqs()
7984 static int hpsa_kdump_soft_reset(struct ctlr_info *h) in hpsa_kdump_soft_reset() argument
7987 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER); in hpsa_kdump_soft_reset()
7989 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); in hpsa_kdump_soft_reset()
7990 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY); in hpsa_kdump_soft_reset()
7992 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); in hpsa_kdump_soft_reset()
7996 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); in hpsa_kdump_soft_reset()
7997 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); in hpsa_kdump_soft_reset()
7999 dev_warn(&h->pdev->dev, "Board failed to become ready " in hpsa_kdump_soft_reset()
8007 static void hpsa_free_reply_queues(struct ctlr_info *h) in hpsa_free_reply_queues() argument
8011 for (i = 0; i < h->nreply_queues; i++) { in hpsa_free_reply_queues()
8012 if (!h->reply_queue[i].head) in hpsa_free_reply_queues()
8014 pci_free_consistent(h->pdev, in hpsa_free_reply_queues()
8015 h->reply_queue_size, in hpsa_free_reply_queues()
8016 h->reply_queue[i].head, in hpsa_free_reply_queues()
8017 h->reply_queue[i].busaddr); in hpsa_free_reply_queues()
8018 h->reply_queue[i].head = NULL; in hpsa_free_reply_queues()
8019 h->reply_queue[i].busaddr = 0; in hpsa_free_reply_queues()
8021 h->reply_queue_size = 0; in hpsa_free_reply_queues()
8024 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) in hpsa_undo_allocations_after_kdump_soft_reset() argument
8026 hpsa_free_performant_mode(h); /* init_one 7 */ in hpsa_undo_allocations_after_kdump_soft_reset()
8027 hpsa_free_sg_chain_blocks(h); /* init_one 6 */ in hpsa_undo_allocations_after_kdump_soft_reset()
8028 hpsa_free_cmd_pool(h); /* init_one 5 */ in hpsa_undo_allocations_after_kdump_soft_reset()
8029 hpsa_free_irqs(h); /* init_one 4 */ in hpsa_undo_allocations_after_kdump_soft_reset()
8030 scsi_host_put(h->scsi_host); /* init_one 3 */ in hpsa_undo_allocations_after_kdump_soft_reset()
8031 h->scsi_host = NULL; /* init_one 3 */ in hpsa_undo_allocations_after_kdump_soft_reset()
8032 hpsa_free_pci_init(h); /* init_one 2_5 */ in hpsa_undo_allocations_after_kdump_soft_reset()
8033 free_percpu(h->lockup_detected); /* init_one 2 */ in hpsa_undo_allocations_after_kdump_soft_reset()
8034 h->lockup_detected = NULL; /* init_one 2 */ in hpsa_undo_allocations_after_kdump_soft_reset()
8035 if (h->resubmit_wq) { in hpsa_undo_allocations_after_kdump_soft_reset()
8036 destroy_workqueue(h->resubmit_wq); /* init_one 1 */ in hpsa_undo_allocations_after_kdump_soft_reset()
8037 h->resubmit_wq = NULL; in hpsa_undo_allocations_after_kdump_soft_reset()
8039 if (h->rescan_ctlr_wq) { in hpsa_undo_allocations_after_kdump_soft_reset()
8040 destroy_workqueue(h->rescan_ctlr_wq); in hpsa_undo_allocations_after_kdump_soft_reset()
8041 h->rescan_ctlr_wq = NULL; in hpsa_undo_allocations_after_kdump_soft_reset()
8043 kfree(h); /* init_one 1 */ in hpsa_undo_allocations_after_kdump_soft_reset()
8047 static void fail_all_outstanding_cmds(struct ctlr_info *h) in fail_all_outstanding_cmds() argument
8053 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */ in fail_all_outstanding_cmds()
8054 for (i = 0; i < h->nr_cmds; i++) { in fail_all_outstanding_cmds()
8055 c = h->cmd_pool + i; in fail_all_outstanding_cmds()
8060 atomic_dec(&h->commands_outstanding); in fail_all_outstanding_cmds()
8063 cmd_free(h, c); in fail_all_outstanding_cmds()
8065 dev_warn(&h->pdev->dev, in fail_all_outstanding_cmds()
8069 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) in set_lockup_detected_for_all_cpus() argument
8075 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); in set_lockup_detected_for_all_cpus()
8081 static void controller_lockup_detected(struct ctlr_info *h) in controller_lockup_detected() argument
8086 h->access.set_intr_mask(h, HPSA_INTR_OFF); in controller_lockup_detected()
8087 spin_lock_irqsave(&h->lock, flags); in controller_lockup_detected()
8088 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); in controller_lockup_detected()
8091 dev_warn(&h->pdev->dev, in controller_lockup_detected()
8093 h->heartbeat_sample_interval / HZ); in controller_lockup_detected()
8096 set_lockup_detected_for_all_cpus(h, lockup_detected); in controller_lockup_detected()
8097 spin_unlock_irqrestore(&h->lock, flags); in controller_lockup_detected()
8098 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n", in controller_lockup_detected()
8099 lockup_detected, h->heartbeat_sample_interval / HZ); in controller_lockup_detected()
8100 pci_disable_device(h->pdev); in controller_lockup_detected()
8101 fail_all_outstanding_cmds(h); in controller_lockup_detected()
8104 static int detect_controller_lockup(struct ctlr_info *h) in detect_controller_lockup() argument
8112 if (time_after64(h->last_intr_timestamp + in detect_controller_lockup()
8113 (h->heartbeat_sample_interval), now)) in detect_controller_lockup()
8121 if (time_after64(h->last_heartbeat_timestamp + in detect_controller_lockup()
8122 (h->heartbeat_sample_interval), now)) in detect_controller_lockup()
8126 spin_lock_irqsave(&h->lock, flags); in detect_controller_lockup()
8127 heartbeat = readl(&h->cfgtable->HeartBeat); in detect_controller_lockup()
8128 spin_unlock_irqrestore(&h->lock, flags); in detect_controller_lockup()
8129 if (h->last_heartbeat == heartbeat) { in detect_controller_lockup()
8130 controller_lockup_detected(h); in detect_controller_lockup()
8135 h->last_heartbeat = heartbeat; in detect_controller_lockup()
8136 h->last_heartbeat_timestamp = now; in detect_controller_lockup()
8140 static void hpsa_ack_ctlr_events(struct ctlr_info *h) in hpsa_ack_ctlr_events() argument
8145 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) in hpsa_ack_ctlr_events()
8149 if ((h->transMethod & (CFGTBL_Trans_io_accel1 in hpsa_ack_ctlr_events()
8151 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || in hpsa_ack_ctlr_events()
8152 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { in hpsa_ack_ctlr_events()
8154 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) in hpsa_ack_ctlr_events()
8156 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) in hpsa_ack_ctlr_events()
8159 scsi_block_requests(h->scsi_host); in hpsa_ack_ctlr_events()
8160 for (i = 0; i < h->ndevices; i++) in hpsa_ack_ctlr_events()
8161 h->dev[i]->offload_enabled = 0; in hpsa_ack_ctlr_events()
8162 hpsa_drain_accel_commands(h); in hpsa_ack_ctlr_events()
8164 dev_warn(&h->pdev->dev, in hpsa_ack_ctlr_events()
8166 h->events, event_type); in hpsa_ack_ctlr_events()
8167 writel(h->events, &(h->cfgtable->clear_event_notify)); in hpsa_ack_ctlr_events()
8169 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); in hpsa_ack_ctlr_events()
8171 hpsa_wait_for_clear_event_notify_ack(h); in hpsa_ack_ctlr_events()
8172 scsi_unblock_requests(h->scsi_host); in hpsa_ack_ctlr_events()
8175 writel(h->events, &(h->cfgtable->clear_event_notify)); in hpsa_ack_ctlr_events()
8176 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); in hpsa_ack_ctlr_events()
8177 hpsa_wait_for_clear_event_notify_ack(h); in hpsa_ack_ctlr_events()
8179 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); in hpsa_ack_ctlr_events()
8180 hpsa_wait_for_mode_change_ack(h); in hpsa_ack_ctlr_events()
8191 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) in hpsa_ctlr_needs_rescan() argument
8193 if (h->drv_req_rescan) { in hpsa_ctlr_needs_rescan()
8194 h->drv_req_rescan = 0; in hpsa_ctlr_needs_rescan()
8198 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) in hpsa_ctlr_needs_rescan()
8201 h->events = readl(&(h->cfgtable->event_notify)); in hpsa_ctlr_needs_rescan()
8202 return h->events & RESCAN_REQUIRED_EVENT_BITS; in hpsa_ctlr_needs_rescan()
8208 static int hpsa_offline_devices_ready(struct ctlr_info *h) in hpsa_offline_devices_ready() argument
8214 spin_lock_irqsave(&h->offline_device_lock, flags); in hpsa_offline_devices_ready()
8215 list_for_each_safe(this, tmp, &h->offline_device_list) { in hpsa_offline_devices_ready()
8218 spin_unlock_irqrestore(&h->offline_device_lock, flags); in hpsa_offline_devices_ready()
8219 if (!hpsa_volume_offline(h, d->scsi3addr)) { in hpsa_offline_devices_ready()
8220 spin_lock_irqsave(&h->offline_device_lock, flags); in hpsa_offline_devices_ready()
8222 spin_unlock_irqrestore(&h->offline_device_lock, flags); in hpsa_offline_devices_ready()
8225 spin_lock_irqsave(&h->offline_device_lock, flags); in hpsa_offline_devices_ready()
8227 spin_unlock_irqrestore(&h->offline_device_lock, flags); in hpsa_offline_devices_ready()
8231 static int hpsa_luns_changed(struct ctlr_info *h) in hpsa_luns_changed() argument
8240 if (!h->lastlogicals) in hpsa_luns_changed()
8245 dev_warn(&h->pdev->dev, in hpsa_luns_changed()
8249 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) { in hpsa_luns_changed()
8250 dev_warn(&h->pdev->dev, in hpsa_luns_changed()
8254 if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) { in hpsa_luns_changed()
8255 dev_info(&h->pdev->dev, in hpsa_luns_changed()
8257 memcpy(h->lastlogicals, logdev, sizeof(*logdev)); in hpsa_luns_changed()
8269 struct ctlr_info *h = container_of(to_delayed_work(work), in hpsa_rescan_ctlr_worker() local
8273 if (h->remove_in_progress) in hpsa_rescan_ctlr_worker()
8276 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { in hpsa_rescan_ctlr_worker()
8277 scsi_host_get(h->scsi_host); in hpsa_rescan_ctlr_worker()
8278 hpsa_ack_ctlr_events(h); in hpsa_rescan_ctlr_worker()
8279 hpsa_scan_start(h->scsi_host); in hpsa_rescan_ctlr_worker()
8280 scsi_host_put(h->scsi_host); in hpsa_rescan_ctlr_worker()
8281 } else if (h->discovery_polling) { in hpsa_rescan_ctlr_worker()
8282 hpsa_disable_rld_caching(h); in hpsa_rescan_ctlr_worker()
8283 if (hpsa_luns_changed(h)) { in hpsa_rescan_ctlr_worker()
8286 dev_info(&h->pdev->dev, in hpsa_rescan_ctlr_worker()
8288 sh = scsi_host_get(h->scsi_host); in hpsa_rescan_ctlr_worker()
8295 spin_lock_irqsave(&h->lock, flags); in hpsa_rescan_ctlr_worker()
8296 if (!h->remove_in_progress) in hpsa_rescan_ctlr_worker()
8297 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, in hpsa_rescan_ctlr_worker()
8298 h->heartbeat_sample_interval); in hpsa_rescan_ctlr_worker()
8299 spin_unlock_irqrestore(&h->lock, flags); in hpsa_rescan_ctlr_worker()
8305 struct ctlr_info *h = container_of(to_delayed_work(work), in hpsa_monitor_ctlr_worker() local
8308 detect_controller_lockup(h); in hpsa_monitor_ctlr_worker()
8309 if (lockup_detected(h)) in hpsa_monitor_ctlr_worker()
8312 spin_lock_irqsave(&h->lock, flags); in hpsa_monitor_ctlr_worker()
8313 if (!h->remove_in_progress) in hpsa_monitor_ctlr_worker()
8314 schedule_delayed_work(&h->monitor_ctlr_work, in hpsa_monitor_ctlr_worker()
8315 h->heartbeat_sample_interval); in hpsa_monitor_ctlr_worker()
8316 spin_unlock_irqrestore(&h->lock, flags); in hpsa_monitor_ctlr_worker()
8319 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, in hpsa_create_controller_wq() argument
8324 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr); in hpsa_create_controller_wq()
8326 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name); in hpsa_create_controller_wq()
8334 struct ctlr_info *h; in hpsa_init_one() local
8368 h = kzalloc(sizeof(*h), GFP_KERNEL); in hpsa_init_one()
8369 if (!h) { in hpsa_init_one()
8374 h->pdev = pdev; in hpsa_init_one()
8376 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; in hpsa_init_one()
8377 INIT_LIST_HEAD(&h->offline_device_list); in hpsa_init_one()
8378 spin_lock_init(&h->lock); in hpsa_init_one()
8379 spin_lock_init(&h->offline_device_lock); in hpsa_init_one()
8380 spin_lock_init(&h->scan_lock); in hpsa_init_one()
8381 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); in hpsa_init_one()
8382 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS); in hpsa_init_one()
8385 h->lockup_detected = alloc_percpu(u32); in hpsa_init_one()
8386 if (!h->lockup_detected) { in hpsa_init_one()
8387 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n"); in hpsa_init_one()
8391 set_lockup_detected_for_all_cpus(h, 0); in hpsa_init_one()
8393 rc = hpsa_pci_init(h); in hpsa_init_one()
8399 rc = hpsa_scsi_host_alloc(h); in hpsa_init_one()
8403 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no); in hpsa_init_one()
8404 h->ctlr = number_of_controllers; in hpsa_init_one()
8422 h->access.set_intr_mask(h, HPSA_INTR_OFF); in hpsa_init_one()
8424 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx); in hpsa_init_one()
8427 rc = hpsa_alloc_cmd_pool(h); in hpsa_init_one()
8430 rc = hpsa_alloc_sg_chain_blocks(h); in hpsa_init_one()
8433 init_waitqueue_head(&h->scan_wait_queue); in hpsa_init_one()
8434 init_waitqueue_head(&h->abort_cmd_wait_queue); in hpsa_init_one()
8435 init_waitqueue_head(&h->event_sync_wait_queue); in hpsa_init_one()
8436 mutex_init(&h->reset_mutex); in hpsa_init_one()
8437 h->scan_finished = 1; /* no scan currently in progress */ in hpsa_init_one()
8439 pci_set_drvdata(pdev, h); in hpsa_init_one()
8440 h->ndevices = 0; in hpsa_init_one()
8442 spin_lock_init(&h->devlock); in hpsa_init_one()
8443 rc = hpsa_put_ctlr_into_performant_mode(h); in hpsa_init_one()
8448 rc = hpsa_scsi_add_host(h); in hpsa_init_one()
8453 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); in hpsa_init_one()
8454 if (!h->rescan_ctlr_wq) { in hpsa_init_one()
8459 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit"); in hpsa_init_one()
8460 if (!h->resubmit_wq) { in hpsa_init_one()
8479 spin_lock_irqsave(&h->lock, flags); in hpsa_init_one()
8480 h->access.set_intr_mask(h, HPSA_INTR_OFF); in hpsa_init_one()
8481 spin_unlock_irqrestore(&h->lock, flags); in hpsa_init_one()
8482 hpsa_free_irqs(h); in hpsa_init_one()
8483 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions, in hpsa_init_one()
8486 dev_warn(&h->pdev->dev, in hpsa_init_one()
8492 hpsa_free_performant_mode(h); /* clean7 */ in hpsa_init_one()
8493 hpsa_free_sg_chain_blocks(h); /* clean6 */ in hpsa_init_one()
8494 hpsa_free_cmd_pool(h); /* clean5 */ in hpsa_init_one()
8502 rc = hpsa_kdump_soft_reset(h); in hpsa_init_one()
8507 dev_info(&h->pdev->dev, "Board READY.\n"); in hpsa_init_one()
8508 dev_info(&h->pdev->dev, in hpsa_init_one()
8510 h->access.set_intr_mask(h, HPSA_INTR_ON); in hpsa_init_one()
8512 h->access.set_intr_mask(h, HPSA_INTR_OFF); in hpsa_init_one()
8514 rc = controller_reset_failed(h->cfgtable); in hpsa_init_one()
8516 dev_info(&h->pdev->dev, in hpsa_init_one()
8523 hpsa_undo_allocations_after_kdump_soft_reset(h); in hpsa_init_one()
8533 h->acciopath_status = 1; in hpsa_init_one()
8535 h->discovery_polling = 0; in hpsa_init_one()
8539 h->access.set_intr_mask(h, HPSA_INTR_ON); in hpsa_init_one()
8541 hpsa_hba_inquiry(h); in hpsa_init_one()
8543 h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL); in hpsa_init_one()
8544 if (!h->lastlogicals) in hpsa_init_one()
8545 dev_info(&h->pdev->dev, in hpsa_init_one()
8549 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; in hpsa_init_one()
8550 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); in hpsa_init_one()
8551 schedule_delayed_work(&h->monitor_ctlr_work, in hpsa_init_one()
8552 h->heartbeat_sample_interval); in hpsa_init_one()
8553 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker); in hpsa_init_one()
8554 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, in hpsa_init_one()
8555 h->heartbeat_sample_interval); in hpsa_init_one()
8559 hpsa_free_performant_mode(h); in hpsa_init_one()
8560 h->access.set_intr_mask(h, HPSA_INTR_OFF); in hpsa_init_one()
8562 hpsa_free_sg_chain_blocks(h); in hpsa_init_one()
8564 hpsa_free_cmd_pool(h); in hpsa_init_one()
8566 hpsa_free_irqs(h); in hpsa_init_one()
8568 scsi_host_put(h->scsi_host); in hpsa_init_one()
8569 h->scsi_host = NULL; in hpsa_init_one()
8571 hpsa_free_pci_init(h); in hpsa_init_one()
8573 if (h->lockup_detected) { in hpsa_init_one()
8574 free_percpu(h->lockup_detected); in hpsa_init_one()
8575 h->lockup_detected = NULL; in hpsa_init_one()
8578 if (h->resubmit_wq) { in hpsa_init_one()
8579 destroy_workqueue(h->resubmit_wq); in hpsa_init_one()
8580 h->resubmit_wq = NULL; in hpsa_init_one()
8582 if (h->rescan_ctlr_wq) { in hpsa_init_one()
8583 destroy_workqueue(h->rescan_ctlr_wq); in hpsa_init_one()
8584 h->rescan_ctlr_wq = NULL; in hpsa_init_one()
8586 kfree(h); in hpsa_init_one()
8590 static void hpsa_flush_cache(struct ctlr_info *h) in hpsa_flush_cache() argument
8596 if (unlikely(lockup_detected(h))) in hpsa_flush_cache()
8602 c = cmd_alloc(h); in hpsa_flush_cache()
8604 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, in hpsa_flush_cache()
8608 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, in hpsa_flush_cache()
8614 dev_warn(&h->pdev->dev, in hpsa_flush_cache()
8616 cmd_free(h, c); in hpsa_flush_cache()
8623 static void hpsa_disable_rld_caching(struct ctlr_info *h) in hpsa_disable_rld_caching() argument
8630 if (unlikely(h->lockup_detected)) in hpsa_disable_rld_caching()
8635 dev_err(&h->pdev->dev, in hpsa_disable_rld_caching()
8640 c = cmd_alloc(h); in hpsa_disable_rld_caching()
8643 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, in hpsa_disable_rld_caching()
8647 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, in hpsa_disable_rld_caching()
8655 if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0, in hpsa_disable_rld_caching()
8659 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, in hpsa_disable_rld_caching()
8665 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, in hpsa_disable_rld_caching()
8669 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, in hpsa_disable_rld_caching()
8678 dev_err(&h->pdev->dev, in hpsa_disable_rld_caching()
8681 cmd_free(h, c); in hpsa_disable_rld_caching()
8687 struct ctlr_info *h; in hpsa_shutdown() local
8689 h = pci_get_drvdata(pdev); in hpsa_shutdown()
8694 hpsa_flush_cache(h); in hpsa_shutdown()
8695 h->access.set_intr_mask(h, HPSA_INTR_OFF); in hpsa_shutdown()
8696 hpsa_free_irqs(h); /* init_one 4 */ in hpsa_shutdown()
8697 hpsa_disable_interrupt_mode(h); /* pci_init 2 */ in hpsa_shutdown()
8700 static void hpsa_free_device_info(struct ctlr_info *h) in hpsa_free_device_info() argument
8704 for (i = 0; i < h->ndevices; i++) { in hpsa_free_device_info()
8705 kfree(h->dev[i]); in hpsa_free_device_info()
8706 h->dev[i] = NULL; in hpsa_free_device_info()
8712 struct ctlr_info *h; in hpsa_remove_one() local
8719 h = pci_get_drvdata(pdev); in hpsa_remove_one()
8722 spin_lock_irqsave(&h->lock, flags); in hpsa_remove_one()
8723 h->remove_in_progress = 1; in hpsa_remove_one()
8724 spin_unlock_irqrestore(&h->lock, flags); in hpsa_remove_one()
8725 cancel_delayed_work_sync(&h->monitor_ctlr_work); in hpsa_remove_one()
8726 cancel_delayed_work_sync(&h->rescan_ctlr_work); in hpsa_remove_one()
8727 destroy_workqueue(h->rescan_ctlr_wq); in hpsa_remove_one()
8728 destroy_workqueue(h->resubmit_wq); in hpsa_remove_one()
8736 if (h->scsi_host) in hpsa_remove_one()
8737 scsi_remove_host(h->scsi_host); /* init_one 8 */ in hpsa_remove_one()
8742 hpsa_free_device_info(h); /* scan */ in hpsa_remove_one()
8744 kfree(h->hba_inquiry_data); /* init_one 10 */ in hpsa_remove_one()
8745 h->hba_inquiry_data = NULL; /* init_one 10 */ in hpsa_remove_one()
8746 hpsa_free_ioaccel2_sg_chain_blocks(h); in hpsa_remove_one()
8747 hpsa_free_performant_mode(h); /* init_one 7 */ in hpsa_remove_one()
8748 hpsa_free_sg_chain_blocks(h); /* init_one 6 */ in hpsa_remove_one()
8749 hpsa_free_cmd_pool(h); /* init_one 5 */ in hpsa_remove_one()
8750 kfree(h->lastlogicals); in hpsa_remove_one()
8754 scsi_host_put(h->scsi_host); /* init_one 3 */ in hpsa_remove_one()
8755 h->scsi_host = NULL; /* init_one 3 */ in hpsa_remove_one()
8758 hpsa_free_pci_init(h); /* init_one 2.5 */ in hpsa_remove_one()
8760 free_percpu(h->lockup_detected); /* init_one 2 */ in hpsa_remove_one()
8761 h->lockup_detected = NULL; /* init_one 2 */ in hpsa_remove_one()
8764 hpsa_delete_sas_host(h); in hpsa_remove_one()
8766 kfree(h); /* init_one 1 */ in hpsa_remove_one()
8828 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) in hpsa_enter_performant_mode() argument
8882 for (i = 0; i < h->nreply_queues; i++) in hpsa_enter_performant_mode()
8883 memset(h->reply_queue[i].head, 0, h->reply_queue_size); in hpsa_enter_performant_mode()
8887 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable); in hpsa_enter_performant_mode()
8889 writel(bft[i], &h->transtable->BlockFetch[i]); in hpsa_enter_performant_mode()
8892 writel(h->max_commands, &h->transtable->RepQSize); in hpsa_enter_performant_mode()
8893 writel(h->nreply_queues, &h->transtable->RepQCount); in hpsa_enter_performant_mode()
8894 writel(0, &h->transtable->RepQCtrAddrLow32); in hpsa_enter_performant_mode()
8895 writel(0, &h->transtable->RepQCtrAddrHigh32); in hpsa_enter_performant_mode()
8897 for (i = 0; i < h->nreply_queues; i++) { in hpsa_enter_performant_mode()
8898 writel(0, &h->transtable->RepQAddr[i].upper); in hpsa_enter_performant_mode()
8899 writel(h->reply_queue[i].busaddr, in hpsa_enter_performant_mode()
8900 &h->transtable->RepQAddr[i].lower); in hpsa_enter_performant_mode()
8903 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); in hpsa_enter_performant_mode()
8904 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); in hpsa_enter_performant_mode()
8910 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); in hpsa_enter_performant_mode()
8911 writel(4, &h->cfgtable->HostWrite.CoalIntCount); in hpsa_enter_performant_mode()
8915 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); in hpsa_enter_performant_mode()
8916 writel(4, &h->cfgtable->HostWrite.CoalIntCount); in hpsa_enter_performant_mode()
8919 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); in hpsa_enter_performant_mode()
8920 if (hpsa_wait_for_mode_change_ack(h)) { in hpsa_enter_performant_mode()
8921 dev_err(&h->pdev->dev, in hpsa_enter_performant_mode()
8925 register_value = readl(&(h->cfgtable->TransportActive)); in hpsa_enter_performant_mode()
8927 dev_err(&h->pdev->dev, in hpsa_enter_performant_mode()
8932 h->access = access; in hpsa_enter_performant_mode()
8933 h->transMethod = transMethod; in hpsa_enter_performant_mode()
8941 for (i = 0; i < h->nreply_queues; i++) { in hpsa_enter_performant_mode()
8942 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); in hpsa_enter_performant_mode()
8943 h->reply_queue[i].current_entry = in hpsa_enter_performant_mode()
8944 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); in hpsa_enter_performant_mode()
8946 bft[7] = h->ioaccel_maxsg + 8; in hpsa_enter_performant_mode()
8947 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, in hpsa_enter_performant_mode()
8948 h->ioaccel1_blockFetchTable); in hpsa_enter_performant_mode()
8951 for (i = 0; i < h->nreply_queues; i++) in hpsa_enter_performant_mode()
8952 memset(h->reply_queue[i].head, in hpsa_enter_performant_mode()
8954 h->reply_queue_size); in hpsa_enter_performant_mode()
8959 for (i = 0; i < h->nr_cmds; i++) { in hpsa_enter_performant_mode()
8960 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; in hpsa_enter_performant_mode()
8963 cp->err_info = (u32) (h->errinfo_pool_dhandle + in hpsa_enter_performant_mode()
8974 cpu_to_le64(h->ioaccel_cmd_pool_dhandle + in hpsa_enter_performant_mode()
8982 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, in hpsa_enter_performant_mode()
8985 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; in hpsa_enter_performant_mode()
8986 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, in hpsa_enter_performant_mode()
8987 4, h->ioaccel2_blockFetchTable); in hpsa_enter_performant_mode()
8988 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); in hpsa_enter_performant_mode()
8991 h->ioaccel2_bft2_regs = in hpsa_enter_performant_mode()
8992 remap_pci_mem(pci_resource_start(h->pdev, in hpsa_enter_performant_mode()
8996 sizeof(*h->ioaccel2_bft2_regs)); in hpsa_enter_performant_mode()
8998 writel(bft2[i], &h->ioaccel2_bft2_regs[i]); in hpsa_enter_performant_mode()
9000 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); in hpsa_enter_performant_mode()
9001 if (hpsa_wait_for_mode_change_ack(h)) { in hpsa_enter_performant_mode()
9002 dev_err(&h->pdev->dev, in hpsa_enter_performant_mode()
9010 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h) in hpsa_free_ioaccel1_cmd_and_bft() argument
9012 if (h->ioaccel_cmd_pool) { in hpsa_free_ioaccel1_cmd_and_bft()
9013 pci_free_consistent(h->pdev, in hpsa_free_ioaccel1_cmd_and_bft()
9014 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), in hpsa_free_ioaccel1_cmd_and_bft()
9015 h->ioaccel_cmd_pool, in hpsa_free_ioaccel1_cmd_and_bft()
9016 h->ioaccel_cmd_pool_dhandle); in hpsa_free_ioaccel1_cmd_and_bft()
9017 h->ioaccel_cmd_pool = NULL; in hpsa_free_ioaccel1_cmd_and_bft()
9018 h->ioaccel_cmd_pool_dhandle = 0; in hpsa_free_ioaccel1_cmd_and_bft()
9020 kfree(h->ioaccel1_blockFetchTable); in hpsa_free_ioaccel1_cmd_and_bft()
9021 h->ioaccel1_blockFetchTable = NULL; in hpsa_free_ioaccel1_cmd_and_bft()
9025 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h) in hpsa_alloc_ioaccel1_cmd_and_bft() argument
9027 h->ioaccel_maxsg = in hpsa_alloc_ioaccel1_cmd_and_bft()
9028 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); in hpsa_alloc_ioaccel1_cmd_and_bft()
9029 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) in hpsa_alloc_ioaccel1_cmd_and_bft()
9030 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; in hpsa_alloc_ioaccel1_cmd_and_bft()
9038 h->ioaccel_cmd_pool = in hpsa_alloc_ioaccel1_cmd_and_bft()
9039 pci_alloc_consistent(h->pdev, in hpsa_alloc_ioaccel1_cmd_and_bft()
9040 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), in hpsa_alloc_ioaccel1_cmd_and_bft()
9041 &(h->ioaccel_cmd_pool_dhandle)); in hpsa_alloc_ioaccel1_cmd_and_bft()
9043 h->ioaccel1_blockFetchTable = in hpsa_alloc_ioaccel1_cmd_and_bft()
9044 kmalloc(((h->ioaccel_maxsg + 1) * in hpsa_alloc_ioaccel1_cmd_and_bft()
9047 if ((h->ioaccel_cmd_pool == NULL) || in hpsa_alloc_ioaccel1_cmd_and_bft()
9048 (h->ioaccel1_blockFetchTable == NULL)) in hpsa_alloc_ioaccel1_cmd_and_bft()
9051 memset(h->ioaccel_cmd_pool, 0, in hpsa_alloc_ioaccel1_cmd_and_bft()
9052 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); in hpsa_alloc_ioaccel1_cmd_and_bft()
9056 hpsa_free_ioaccel1_cmd_and_bft(h); in hpsa_alloc_ioaccel1_cmd_and_bft()
9061 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h) in hpsa_free_ioaccel2_cmd_and_bft() argument
9063 hpsa_free_ioaccel2_sg_chain_blocks(h); in hpsa_free_ioaccel2_cmd_and_bft()
9065 if (h->ioaccel2_cmd_pool) { in hpsa_free_ioaccel2_cmd_and_bft()
9066 pci_free_consistent(h->pdev, in hpsa_free_ioaccel2_cmd_and_bft()
9067 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), in hpsa_free_ioaccel2_cmd_and_bft()
9068 h->ioaccel2_cmd_pool, in hpsa_free_ioaccel2_cmd_and_bft()
9069 h->ioaccel2_cmd_pool_dhandle); in hpsa_free_ioaccel2_cmd_and_bft()
9070 h->ioaccel2_cmd_pool = NULL; in hpsa_free_ioaccel2_cmd_and_bft()
9071 h->ioaccel2_cmd_pool_dhandle = 0; in hpsa_free_ioaccel2_cmd_and_bft()
9073 kfree(h->ioaccel2_blockFetchTable); in hpsa_free_ioaccel2_cmd_and_bft()
9074 h->ioaccel2_blockFetchTable = NULL; in hpsa_free_ioaccel2_cmd_and_bft()
9078 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h) in hpsa_alloc_ioaccel2_cmd_and_bft() argument
9084 h->ioaccel_maxsg = in hpsa_alloc_ioaccel2_cmd_and_bft()
9085 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); in hpsa_alloc_ioaccel2_cmd_and_bft()
9086 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) in hpsa_alloc_ioaccel2_cmd_and_bft()
9087 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; in hpsa_alloc_ioaccel2_cmd_and_bft()
9091 h->ioaccel2_cmd_pool = in hpsa_alloc_ioaccel2_cmd_and_bft()
9092 pci_alloc_consistent(h->pdev, in hpsa_alloc_ioaccel2_cmd_and_bft()
9093 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), in hpsa_alloc_ioaccel2_cmd_and_bft()
9094 &(h->ioaccel2_cmd_pool_dhandle)); in hpsa_alloc_ioaccel2_cmd_and_bft()
9096 h->ioaccel2_blockFetchTable = in hpsa_alloc_ioaccel2_cmd_and_bft()
9097 kmalloc(((h->ioaccel_maxsg + 1) * in hpsa_alloc_ioaccel2_cmd_and_bft()
9100 if ((h->ioaccel2_cmd_pool == NULL) || in hpsa_alloc_ioaccel2_cmd_and_bft()
9101 (h->ioaccel2_blockFetchTable == NULL)) { in hpsa_alloc_ioaccel2_cmd_and_bft()
9106 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h); in hpsa_alloc_ioaccel2_cmd_and_bft()
9110 memset(h->ioaccel2_cmd_pool, 0, in hpsa_alloc_ioaccel2_cmd_and_bft()
9111 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); in hpsa_alloc_ioaccel2_cmd_and_bft()
9115 hpsa_free_ioaccel2_cmd_and_bft(h); in hpsa_alloc_ioaccel2_cmd_and_bft()
9120 static void hpsa_free_performant_mode(struct ctlr_info *h) in hpsa_free_performant_mode() argument
9122 kfree(h->blockFetchTable); in hpsa_free_performant_mode()
9123 h->blockFetchTable = NULL; in hpsa_free_performant_mode()
9124 hpsa_free_reply_queues(h); in hpsa_free_performant_mode()
9125 hpsa_free_ioaccel1_cmd_and_bft(h); in hpsa_free_performant_mode()
9126 hpsa_free_ioaccel2_cmd_and_bft(h); in hpsa_free_performant_mode()
9132 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) in hpsa_put_ctlr_into_performant_mode() argument
9142 trans_support = readl(&(h->cfgtable->TransportSupport)); in hpsa_put_ctlr_into_performant_mode()
9150 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h); in hpsa_put_ctlr_into_performant_mode()
9156 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h); in hpsa_put_ctlr_into_performant_mode()
9161 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; in hpsa_put_ctlr_into_performant_mode()
9162 hpsa_get_max_perf_mode_cmds(h); in hpsa_put_ctlr_into_performant_mode()
9164 h->reply_queue_size = h->max_commands * sizeof(u64); in hpsa_put_ctlr_into_performant_mode()
9166 for (i = 0; i < h->nreply_queues; i++) { in hpsa_put_ctlr_into_performant_mode()
9167 h->reply_queue[i].head = pci_alloc_consistent(h->pdev, in hpsa_put_ctlr_into_performant_mode()
9168 h->reply_queue_size, in hpsa_put_ctlr_into_performant_mode()
9169 &(h->reply_queue[i].busaddr)); in hpsa_put_ctlr_into_performant_mode()
9170 if (!h->reply_queue[i].head) { in hpsa_put_ctlr_into_performant_mode()
9174 h->reply_queue[i].size = h->max_commands; in hpsa_put_ctlr_into_performant_mode()
9175 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ in hpsa_put_ctlr_into_performant_mode()
9176 h->reply_queue[i].current_entry = 0; in hpsa_put_ctlr_into_performant_mode()
9180 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * in hpsa_put_ctlr_into_performant_mode()
9182 if (!h->blockFetchTable) { in hpsa_put_ctlr_into_performant_mode()
9187 rc = hpsa_enter_performant_mode(h, trans_support); in hpsa_put_ctlr_into_performant_mode()
9193 kfree(h->blockFetchTable); in hpsa_put_ctlr_into_performant_mode()
9194 h->blockFetchTable = NULL; in hpsa_put_ctlr_into_performant_mode()
9196 hpsa_free_reply_queues(h); in hpsa_put_ctlr_into_performant_mode()
9197 hpsa_free_ioaccel1_cmd_and_bft(h); in hpsa_put_ctlr_into_performant_mode()
9198 hpsa_free_ioaccel2_cmd_and_bft(h); in hpsa_put_ctlr_into_performant_mode()
9207 static void hpsa_drain_accel_commands(struct ctlr_info *h) in hpsa_drain_accel_commands() argument
9215 for (i = 0; i < h->nr_cmds; i++) { in hpsa_drain_accel_commands()
9216 c = h->cmd_pool + i; in hpsa_drain_accel_commands()
9220 cmd_free(h, c); in hpsa_drain_accel_commands()
9392 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, in hpsa_find_device_by_sas_rphy() argument
9398 for (i = 0; i < h->ndevices; i++) { in hpsa_find_device_by_sas_rphy()
9399 device = h->dev[i]; in hpsa_find_device_by_sas_rphy()
9409 static int hpsa_add_sas_host(struct ctlr_info *h) in hpsa_add_sas_host() argument
9417 parent_dev = &h->scsi_host->shost_gendev; in hpsa_add_sas_host()
9423 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address); in hpsa_add_sas_host()
9439 h->sas_host = hpsa_sas_node; in hpsa_add_sas_host()
9453 static void hpsa_delete_sas_host(struct ctlr_info *h) in hpsa_delete_sas_host() argument
9455 hpsa_free_sas_node(h->sas_host); in hpsa_delete_sas_host()