Lines Matching refs:dd
124 static int mtip_block_initialize(struct driver_data *dd);
152 struct driver_data *dd = pci_get_drvdata(pdev); in mtip_check_surprise_removal() local
154 if (dd->sr) in mtip_check_surprise_removal()
160 dd->sr = true; in mtip_check_surprise_removal()
161 if (dd->queue) in mtip_check_surprise_removal()
162 set_bit(QUEUE_FLAG_DEAD, &dd->queue->queue_flags); in mtip_check_surprise_removal()
164 dev_warn(&dd->pdev->dev, in mtip_check_surprise_removal()
172 static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd) in mtip_get_int_command() argument
176 if (mtip_check_surprise_removal(dd->pdev)) in mtip_get_int_command()
179 rq = blk_mq_alloc_request(dd->queue, 0, __GFP_RECLAIM, true); in mtip_get_int_command()
186 static void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd) in mtip_put_int_command() argument
194 static struct request *mtip_rq_from_tag(struct driver_data *dd, in mtip_rq_from_tag() argument
197 struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0]; in mtip_rq_from_tag()
202 static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd, in mtip_cmd_from_tag() argument
205 struct request *rq = mtip_rq_from_tag(dd, tag); in mtip_cmd_from_tag()
231 struct driver_data *dd = port->dd; in mtip_async_complete() local
234 if (unlikely(!dd) || unlikely(!port)) in mtip_async_complete()
238 dev_warn(&port->dd->pdev->dev, in mtip_async_complete()
242 rq = mtip_rq_from_tag(dd, tag); in mtip_async_complete()
256 static int mtip_hba_reset(struct driver_data *dd) in mtip_hba_reset() argument
261 writel(HOST_RESET, dd->mmio + HOST_CTL); in mtip_hba_reset()
264 readl(dd->mmio + HOST_CTL); in mtip_hba_reset()
273 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) in mtip_hba_reset()
276 } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) in mtip_hba_reset()
279 if (readl(dd->mmio + HOST_CTL) & HOST_RESET) in mtip_hba_reset()
415 if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) { in mtip_init_port()
430 for (i = 0; i < port->dd->slot_groups; i++) in mtip_init_port()
437 writel(readl(port->dd->mmio + HOST_IRQ_STAT), in mtip_init_port()
438 port->dd->mmio + HOST_IRQ_STAT); in mtip_init_port()
465 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) in mtip_restart_port()
473 dev_warn(&port->dd->pdev->dev, in mtip_restart_port()
476 if (mtip_hba_reset(port->dd)) in mtip_restart_port()
477 dev_err(&port->dd->pdev->dev, in mtip_restart_port()
484 dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n"); in mtip_restart_port()
496 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) in mtip_restart_port()
510 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) in mtip_restart_port()
514 dev_warn(&port->dd->pdev->dev, in mtip_restart_port()
522 static int mtip_device_reset(struct driver_data *dd) in mtip_device_reset() argument
526 if (mtip_check_surprise_removal(dd->pdev)) in mtip_device_reset()
529 if (mtip_hba_reset(dd) < 0) in mtip_device_reset()
533 mtip_init_port(dd->port); in mtip_device_reset()
534 mtip_start_port(dd->port); in mtip_device_reset()
537 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, in mtip_device_reset()
538 dd->mmio + HOST_CTL); in mtip_device_reset()
545 static void print_tags(struct driver_data *dd, in print_tags() argument
557 dev_warn(&dd->pdev->dev, in print_tags()
581 dev_warn(&port->dd->pdev->dev, in mtip_completion()
606 static void mtip_handle_tfe(struct driver_data *dd) in mtip_handle_tfe() argument
619 dev_warn(&dd->pdev->dev, "Taskfile error\n"); in mtip_handle_tfe()
621 port = dd->port; in mtip_handle_tfe()
624 cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); in mtip_handle_tfe()
638 for (group = 0; group < dd->slot_groups; group++) { in mtip_handle_tfe()
641 dev_warn(&dd->pdev->dev, "g=%u, comp=%x\n", group, completed); in mtip_handle_tfe()
656 cmd = mtip_cmd_from_tag(dd, tag); in mtip_handle_tfe()
662 dev_err(&port->dd->pdev->dev, in mtip_handle_tfe()
665 if (mtip_check_surprise_removal(dd->pdev)) { in mtip_handle_tfe()
673 print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt); in mtip_handle_tfe()
680 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, in mtip_handle_tfe()
681 dd->port->log_buf, in mtip_handle_tfe()
682 dd->port->log_buf_dma, 1); in mtip_handle_tfe()
684 dev_warn(&dd->pdev->dev, in mtip_handle_tfe()
688 buf = (unsigned char *)dd->port->log_buf; in mtip_handle_tfe()
690 dev_info(&dd->pdev->dev, in mtip_handle_tfe()
692 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag); in mtip_handle_tfe()
697 dev_info(&dd->pdev->dev, in mtip_handle_tfe()
699 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag); in mtip_handle_tfe()
704 set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag); in mtip_handle_tfe()
705 dev_info(&dd->pdev->dev, in mtip_handle_tfe()
716 for (group = 0; group < dd->slot_groups; group++) { in mtip_handle_tfe()
720 cmd = mtip_cmd_from_tag(dd, tag); in mtip_handle_tfe()
732 dev_warn(&dd->pdev->dev, in mtip_handle_tfe()
762 dev_warn(&port->dd->pdev->dev, in mtip_handle_tfe()
768 dev_warn(&port->dd->pdev->dev, in mtip_handle_tfe()
773 print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt); in mtip_handle_tfe()
782 struct driver_data *dd = port->dd; in mtip_workq_sdbfx() local
802 command = mtip_cmd_from_tag(dd, tag); in mtip_workq_sdbfx()
806 dev_dbg(&dd->pdev->dev, in mtip_workq_sdbfx()
811 dd->pdev)) { in mtip_workq_sdbfx()
820 if (atomic_dec_return(&dd->irq_workers_active) == 0) in mtip_workq_sdbfx()
821 writel(0xffffffff, dd->mmio + HOST_IRQ_STAT); in mtip_workq_sdbfx()
827 static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat) in mtip_process_legacy() argument
829 struct mtip_port *port = dd->port; in mtip_process_legacy()
830 struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); in mtip_process_legacy()
847 static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat) in mtip_process_errors() argument
851 dev_warn(&dd->pdev->dev, in mtip_process_errors()
853 writel((1 << 26), dd->port->mmio + PORT_SCR_ERR); in mtip_process_errors()
857 dev_warn(&dd->pdev->dev, in mtip_process_errors()
859 writel((1 << 16), dd->port->mmio + PORT_SCR_ERR); in mtip_process_errors()
863 dev_warn(&dd->pdev->dev, in mtip_process_errors()
866 if (mtip_check_surprise_removal(dd->pdev)) in mtip_process_errors()
870 set_bit(MTIP_PF_EH_ACTIVE_BIT, &dd->port->flags); in mtip_process_errors()
871 wake_up_interruptible(&dd->port->svc_wait); in mtip_process_errors()
877 struct driver_data *dd = (struct driver_data *) data; in mtip_handle_irq() local
878 struct mtip_port *port = dd->port; in mtip_handle_irq()
884 hba_stat = readl(dd->mmio + HOST_IRQ_STAT); in mtip_handle_irq()
891 mtip_check_surprise_removal(dd->pdev); in mtip_handle_irq()
899 WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0); in mtip_handle_irq()
904 twork = &dd->work[i]; in mtip_handle_irq()
910 atomic_set(&dd->irq_workers_active, workers); in mtip_handle_irq()
913 twork = &dd->work[i]; in mtip_handle_irq()
917 dd->isr_workq, in mtip_handle_irq()
921 if (likely(dd->work[0].completed)) in mtip_handle_irq()
923 dd->work[0].completed); in mtip_handle_irq()
935 if (unlikely(mtip_check_surprise_removal(dd->pdev))) { in mtip_handle_irq()
940 &dd->dd_flag)) in mtip_handle_irq()
943 mtip_process_errors(dd, port_stat & PORT_IRQ_ERR); in mtip_handle_irq()
947 mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY); in mtip_handle_irq()
952 writel(hba_stat, dd->mmio + HOST_IRQ_STAT); in mtip_handle_irq()
969 struct driver_data *dd = instance; in mtip_irq_handler() local
971 return mtip_handle_irq(dd); in mtip_irq_handler()
1004 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); in mtip_pause_ncq()
1005 clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag); in mtip_pause_ncq()
1033 blk_mq_stop_hw_queues(port->dd->queue); in mtip_quiesce_io()
1051 if (mtip_check_surprise_removal(port->dd->pdev)) in mtip_quiesce_io()
1059 for (n = 1; n < port->dd->slot_groups; n++) in mtip_quiesce_io()
1066 blk_mq_start_stopped_hw_queues(port->dd->queue, true); in mtip_quiesce_io()
1069 blk_mq_start_stopped_hw_queues(port->dd->queue, true); in mtip_quiesce_io()
1103 struct driver_data *dd = port->dd; in mtip_exec_internal_command() local
1109 dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n"); in mtip_exec_internal_command()
1113 int_cmd = mtip_get_int_command(dd); in mtip_exec_internal_command()
1131 dev_warn(&dd->pdev->dev, in mtip_exec_internal_command()
1133 mtip_put_int_command(dd, int_cmd); in mtip_exec_internal_command()
1185 dev_err(&dd->pdev->dev, in mtip_exec_internal_command()
1192 dev_err(&dd->pdev->dev, in mtip_exec_internal_command()
1196 dev_err(&dd->pdev->dev, in mtip_exec_internal_command()
1200 if (mtip_check_surprise_removal(dd->pdev) || in mtip_exec_internal_command()
1202 &dd->dd_flag)) { in mtip_exec_internal_command()
1203 dev_err(&dd->pdev->dev, in mtip_exec_internal_command()
1209 mtip_device_reset(dd); /* recover from timeout issue */ in mtip_exec_internal_command()
1221 if (mtip_check_surprise_removal(dd->pdev)) { in mtip_exec_internal_command()
1227 &dd->dd_flag)) { in mtip_exec_internal_command()
1236 dev_err(&dd->pdev->dev, in mtip_exec_internal_command()
1239 mtip_device_reset(dd); in mtip_exec_internal_command()
1244 hba_stat = readl(dd->mmio + HOST_IRQ_STAT); in mtip_exec_internal_command()
1247 dd->mmio + HOST_IRQ_STAT); in mtip_exec_internal_command()
1256 if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { in mtip_exec_internal_command()
1257 mtip_device_reset(dd); in mtip_exec_internal_command()
1263 mtip_put_int_command(dd, int_cmd); in mtip_exec_internal_command()
1294 static void mtip_set_timeout(struct driver_data *dd, in mtip_set_timeout() argument
1305 *timeout = ((*(dd->port->identify + 90) * 2) * 60000); in mtip_set_timeout()
1307 *timeout = ((*(dd->port->identify + 89) * 2) * 60000); in mtip_set_timeout()
1349 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) in mtip_get_identify()
1397 set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); in mtip_get_identify()
1399 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); in mtip_get_identify()
1404 port->dd->trim_supp = true; in mtip_get_identify()
1407 port->dd->trim_supp = false; in mtip_get_identify()
1448 mtip_set_timeout(port->dd, &fis, &timeout, 0); in mtip_standby_immediate()
1462 dev_warn(&port->dd->pdev->dev, in mtip_standby_immediate()
1563 dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n"); in mtip_get_smart_attr()
1567 dev_warn(&port->dd->pdev->dev, "SMART not supported\n"); in mtip_get_smart_attr()
1571 dev_warn(&port->dd->pdev->dev, "SMART not enabled\n"); in mtip_get_smart_attr()
1578 dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n"); in mtip_get_smart_attr()
1590 dev_warn(&port->dd->pdev->dev, in mtip_get_smart_attr()
1610 static int mtip_send_trim(struct driver_data *dd, unsigned int lba, in mtip_send_trim() argument
1619 if (!len || dd->trim_supp == false) in mtip_send_trim()
1632 buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr, in mtip_send_trim()
1660 if (mtip_exec_internal_command(dd->port, in mtip_send_trim()
1670 dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr); in mtip_send_trim()
1684 static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors) in mtip_hw_get_capacity() argument
1686 struct mtip_port *port = dd->port; in mtip_hw_get_capacity()
1715 dev_info(&port->dd->pdev->dev, in mtip_dump_identify()
1719 dev_info(&port->dd->pdev->dev, in mtip_dump_identify()
1723 dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf); in mtip_dump_identify()
1725 dev_info(&port->dd->pdev->dev, "Security: %04x %s\n", in mtip_dump_identify()
1729 if (mtip_hw_get_capacity(port->dd, §ors)) in mtip_dump_identify()
1730 dev_info(&port->dd->pdev->dev, in mtip_dump_identify()
1735 pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid); in mtip_dump_identify()
1747 dev_info(&port->dd->pdev->dev, in mtip_dump_identify()
1760 static inline void fill_command_sg(struct driver_data *dd, in fill_command_sg() argument
1774 dev_err(&dd->pdev->dev, in fill_command_sg()
1811 mtip_set_timeout(port->dd, &fis, &to, 0); in exec_drive_task()
1877 buf = dmam_alloc_coherent(&port->dd->pdev->dev, in exec_drive_command()
1882 dev_err(&port->dd->pdev->dev, in exec_drive_command()
1903 mtip_set_timeout(port->dd, &fis, &to, 0); in exec_drive_command()
1956 dmam_free_coherent(&port->dd->pdev->dev, in exec_drive_command()
2012 static int exec_drive_taskfile(struct driver_data *dd, in exec_drive_taskfile() argument
2053 outbuf_dma = pci_map_single(dd->pdev, in exec_drive_taskfile()
2075 inbuf_dma = pci_map_single(dd->pdev, in exec_drive_taskfile()
2089 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP); in exec_drive_taskfile()
2092 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP); in exec_drive_taskfile()
2095 reply = (dd->port->rxfis + RX_FIS_D2H_REG); in exec_drive_taskfile()
2137 dev_warn(&dd->pdev->dev, in exec_drive_taskfile()
2165 mtip_set_timeout(dd, &fis, &timeout, erasemode); in exec_drive_taskfile()
2174 if (mtip_exec_internal_command(dd->port, in exec_drive_taskfile()
2186 task_file_data = readl(dd->port->mmio+PORT_TFDATA); in exec_drive_taskfile()
2189 reply = dd->port->rxfis + RX_FIS_PIO_SETUP; in exec_drive_taskfile()
2192 reply = dd->port->rxfis + RX_FIS_D2H_REG; in exec_drive_taskfile()
2198 pci_unmap_single(dd->pdev, inbuf_dma, in exec_drive_taskfile()
2201 pci_unmap_single(dd->pdev, outbuf_dma, in exec_drive_taskfile()
2249 pci_unmap_single(dd->pdev, inbuf_dma, in exec_drive_taskfile()
2252 pci_unmap_single(dd->pdev, outbuf_dma, in exec_drive_taskfile()
2277 static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, in mtip_hw_ioctl() argument
2283 if (copy_to_user((void __user *)arg, dd->port->identify, in mtip_hw_ioctl()
2299 if (exec_drive_command(dd->port, in mtip_hw_ioctl()
2323 if (exec_drive_task(dd->port, drive_command)) in mtip_hw_ioctl()
2344 ret = exec_drive_taskfile(dd, (void __user *) arg, in mtip_hw_ioctl()
2381 static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, in mtip_hw_submit_io() argument
2386 struct mtip_port *port = dd->port; in mtip_hw_submit_io()
2392 nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); in mtip_hw_submit_io()
2426 fill_command_sg(dd, command, nents); in mtip_hw_submit_io()
2441 command->comp_data = dd; in mtip_hw_submit_io()
2473 struct driver_data *dd = dev_to_disk(dev)->private_data; in mtip_hw_show_status() local
2476 if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag)) in mtip_hw_show_status()
2478 else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag)) in mtip_hw_show_status()
2493 struct driver_data *dd, *tmp; in show_device_status() local
2500 list_for_each_entry_safe(dd, tmp, &online_list, online_list) { in show_device_status()
2501 if (dd->pdev) { in show_device_status()
2502 if (dd->port && in show_device_status()
2503 dd->port->identify && in show_device_status()
2504 dd->port->identify_valid) { in show_device_status()
2506 (char *) (dd->port->identify + 10), 21); in show_device_status()
2507 status = *(dd->port->identify + 141); in show_device_status()
2513 if (dd->port && in show_device_status()
2514 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { in show_device_status()
2517 dev_name(&dd->pdev->dev), in show_device_status()
2523 dev_name(&dd->pdev->dev), in show_device_status()
2530 list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) { in show_device_status()
2531 if (dd->pdev) { in show_device_status()
2532 if (dd->port && in show_device_status()
2533 dd->port->identify && in show_device_status()
2534 dd->port->identify_valid) { in show_device_status()
2536 (char *) (dd->port->identify+10), 21); in show_device_status()
2537 status = *(dd->port->identify + 141); in show_device_status()
2543 if (dd->port && in show_device_status()
2544 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { in show_device_status()
2547 dev_name(&dd->pdev->dev), in show_device_status()
2553 dev_name(&dd->pdev->dev), in show_device_status()
2566 struct driver_data *dd = (struct driver_data *)f->private_data; in mtip_hw_read_device_status() local
2576 dev_err(&dd->pdev->dev, in mtip_hw_read_device_status()
2595 struct driver_data *dd = (struct driver_data *)f->private_data; in mtip_hw_read_registers() local
2606 dev_err(&dd->pdev->dev, in mtip_hw_read_registers()
2613 for (n = dd->slot_groups-1; n >= 0; n--) in mtip_hw_read_registers()
2615 readl(dd->port->s_active[n])); in mtip_hw_read_registers()
2620 for (n = dd->slot_groups-1; n >= 0; n--) in mtip_hw_read_registers()
2622 readl(dd->port->cmd_issue[n])); in mtip_hw_read_registers()
2627 for (n = dd->slot_groups-1; n >= 0; n--) in mtip_hw_read_registers()
2629 readl(dd->port->completed[n])); in mtip_hw_read_registers()
2633 readl(dd->port->mmio + PORT_IRQ_STAT)); in mtip_hw_read_registers()
2635 readl(dd->mmio + HOST_IRQ_STAT)); in mtip_hw_read_registers()
2640 for (n = dd->slot_groups-1; n >= 0; n--) { in mtip_hw_read_registers()
2643 dd->port->cmds_to_issue[n/2] >> (32*(n&1)); in mtip_hw_read_registers()
2645 group_allocated = dd->port->cmds_to_issue[n]; in mtip_hw_read_registers()
2662 struct driver_data *dd = (struct driver_data *)f->private_data; in mtip_hw_read_flags() local
2672 dev_err(&dd->pdev->dev, in mtip_hw_read_flags()
2678 dd->port->flags); in mtip_hw_read_flags()
2680 dd->dd_flag); in mtip_hw_read_flags()
2722 static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj) in mtip_hw_sysfs_init() argument
2724 if (!kobj || !dd) in mtip_hw_sysfs_init()
2728 dev_warn(&dd->pdev->dev, in mtip_hw_sysfs_init()
2743 static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj) in mtip_hw_sysfs_exit() argument
2745 if (!kobj || !dd) in mtip_hw_sysfs_exit()
2753 static int mtip_hw_debugfs_init(struct driver_data *dd) in mtip_hw_debugfs_init() argument
2758 dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent); in mtip_hw_debugfs_init()
2759 if (IS_ERR_OR_NULL(dd->dfs_node)) { in mtip_hw_debugfs_init()
2760 dev_warn(&dd->pdev->dev, in mtip_hw_debugfs_init()
2762 dd->disk->disk_name); in mtip_hw_debugfs_init()
2763 dd->dfs_node = NULL; in mtip_hw_debugfs_init()
2767 debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd, in mtip_hw_debugfs_init()
2769 debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd, in mtip_hw_debugfs_init()
2775 static void mtip_hw_debugfs_exit(struct driver_data *dd) in mtip_hw_debugfs_exit() argument
2777 if (dd->dfs_node) in mtip_hw_debugfs_exit()
2778 debugfs_remove_recursive(dd->dfs_node); in mtip_hw_debugfs_exit()
2789 static inline void hba_setup(struct driver_data *dd) in hba_setup() argument
2792 hwdata = readl(dd->mmio + HOST_HSORG); in hba_setup()
2798 dd->mmio + HOST_HSORG); in hba_setup()
2801 static int mtip_device_unaligned_constrained(struct driver_data *dd) in mtip_device_unaligned_constrained() argument
2803 return (dd->pdev->device == P420M_DEVICE_ID ? 1 : 0); in mtip_device_unaligned_constrained()
2816 static void mtip_detect_product(struct driver_data *dd) in mtip_detect_product() argument
2828 hwdata = readl(dd->mmio + HOST_HSORG); in mtip_detect_product()
2830 dd->product_type = MTIP_PRODUCT_UNKNOWN; in mtip_detect_product()
2831 dd->slot_groups = 1; in mtip_detect_product()
2834 dd->product_type = MTIP_PRODUCT_ASICFPGA; in mtip_detect_product()
2837 dev_info(&dd->pdev->dev, in mtip_detect_product()
2845 dev_warn(&dd->pdev->dev, in mtip_detect_product()
2850 dd->slot_groups = slotgroups; in mtip_detect_product()
2854 dev_warn(&dd->pdev->dev, "Unrecognized product id\n"); in mtip_detect_product()
2866 static int mtip_ftl_rebuild_poll(struct driver_data *dd) in mtip_ftl_rebuild_poll() argument
2870 dev_warn(&dd->pdev->dev, in mtip_ftl_rebuild_poll()
2878 &dd->dd_flag))) in mtip_ftl_rebuild_poll()
2880 if (mtip_check_surprise_removal(dd->pdev)) in mtip_ftl_rebuild_poll()
2883 if (mtip_get_identify(dd->port, NULL) < 0) in mtip_ftl_rebuild_poll()
2886 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == in mtip_ftl_rebuild_poll()
2891 dev_warn(&dd->pdev->dev, in mtip_ftl_rebuild_poll()
2897 dev_warn(&dd->pdev->dev, in mtip_ftl_rebuild_poll()
2900 mtip_block_initialize(dd); in mtip_ftl_rebuild_poll()
2906 dev_err(&dd->pdev->dev, in mtip_ftl_rebuild_poll()
2915 struct driver_data *dd = rq->q->queuedata; in mtip_softirq_done_fn() local
2918 dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, in mtip_softirq_done_fn()
2922 up(&dd->port->cmd_slot_unal); in mtip_softirq_done_fn()
2930 struct driver_data *dd = data; in mtip_abort_cmd() local
2934 clear_bit(req->tag, dd->port->cmds_to_issue); in mtip_abort_cmd()
2942 struct driver_data *dd = data; in mtip_queue_cmd() local
2944 set_bit(req->tag, dd->port->cmds_to_issue); in mtip_queue_cmd()
2959 struct driver_data *dd = (struct driver_data *)data; in mtip_service_thread() local
2961 unsigned int num_cmd_slots = dd->slot_groups * 32; in mtip_service_thread()
2962 struct mtip_port *port = dd->port; in mtip_service_thread()
2982 &dd->dd_flag))) in mtip_service_thread()
2990 mtip_handle_tfe(dd); in mtip_service_thread()
3002 } while (atomic_read(&dd->irq_workers_active) != 0 && in mtip_service_thread()
3005 if (atomic_read(&dd->irq_workers_active) != 0) in mtip_service_thread()
3006 dev_warn(&dd->pdev->dev, in mtip_service_thread()
3009 spin_lock(dd->queue->queue_lock); in mtip_service_thread()
3010 blk_mq_all_tag_busy_iter(*dd->tags.tags, in mtip_service_thread()
3011 mtip_queue_cmd, dd); in mtip_service_thread()
3012 spin_unlock(dd->queue->queue_lock); in mtip_service_thread()
3014 set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags); in mtip_service_thread()
3016 if (mtip_device_reset(dd)) in mtip_service_thread()
3017 blk_mq_all_tag_busy_iter(*dd->tags.tags, in mtip_service_thread()
3018 mtip_abort_cmd, dd); in mtip_service_thread()
3020 clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags); in mtip_service_thread()
3055 if (mtip_ftl_rebuild_poll(dd) == 0) in mtip_service_thread()
3072 static void mtip_dma_free(struct driver_data *dd) in mtip_dma_free() argument
3074 struct mtip_port *port = dd->port; in mtip_dma_free()
3077 dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, in mtip_dma_free()
3081 dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, in mtip_dma_free()
3094 static int mtip_dma_alloc(struct driver_data *dd) in mtip_dma_alloc() argument
3096 struct mtip_port *port = dd->port; in mtip_dma_alloc()
3100 dmam_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, in mtip_dma_alloc()
3108 dmam_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, in mtip_dma_alloc()
3111 dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, in mtip_dma_alloc()
3132 static int mtip_hw_get_identify(struct driver_data *dd) in mtip_hw_get_identify() argument
3138 if (mtip_get_identify(dd->port, NULL) < 0) in mtip_hw_get_identify()
3141 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == in mtip_hw_get_identify()
3143 set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags); in mtip_hw_get_identify()
3146 mtip_dump_identify(dd->port); in mtip_hw_get_identify()
3149 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, in mtip_hw_get_identify()
3150 dd->port->log_buf, in mtip_hw_get_identify()
3151 dd->port->log_buf_dma, 1); in mtip_hw_get_identify()
3153 dev_warn(&dd->pdev->dev, in mtip_hw_get_identify()
3157 buf = (unsigned char *)dd->port->log_buf; in mtip_hw_get_identify()
3159 dev_info(&dd->pdev->dev, in mtip_hw_get_identify()
3161 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag); in mtip_hw_get_identify()
3164 dev_info(&dd->pdev->dev, in mtip_hw_get_identify()
3166 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag); in mtip_hw_get_identify()
3169 dev_info(&dd->pdev->dev, in mtip_hw_get_identify()
3171 set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag); in mtip_hw_get_identify()
3177 if (mtip_get_smart_attr(dd->port, 242, &attr242)) in mtip_hw_get_identify()
3178 dev_warn(&dd->pdev->dev, in mtip_hw_get_identify()
3181 dev_info(&dd->pdev->dev, in mtip_hw_get_identify()
3196 static int mtip_hw_init(struct driver_data *dd) in mtip_hw_init() argument
3203 dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR]; in mtip_hw_init()
3205 mtip_detect_product(dd); in mtip_hw_init()
3206 if (dd->product_type == MTIP_PRODUCT_UNKNOWN) { in mtip_hw_init()
3210 num_command_slots = dd->slot_groups * 32; in mtip_hw_init()
3212 hba_setup(dd); in mtip_hw_init()
3214 dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL, in mtip_hw_init()
3215 dd->numa_node); in mtip_hw_init()
3216 if (!dd->port) { in mtip_hw_init()
3217 dev_err(&dd->pdev->dev, in mtip_hw_init()
3224 dd->work[i].port = dd->port; in mtip_hw_init()
3227 if (mtip_device_unaligned_constrained(dd)) in mtip_hw_init()
3228 dd->unal_qdepth = MTIP_MAX_UNALIGNED_SLOTS; in mtip_hw_init()
3230 dd->unal_qdepth = 0; in mtip_hw_init()
3232 sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth); in mtip_hw_init()
3236 spin_lock_init(&dd->port->cmd_issue_lock[i]); in mtip_hw_init()
3239 dd->port->mmio = dd->mmio + PORT_OFFSET; in mtip_hw_init()
3240 dd->port->dd = dd; in mtip_hw_init()
3243 rv = mtip_dma_alloc(dd); in mtip_hw_init()
3248 for (i = 0; i < dd->slot_groups; i++) { in mtip_hw_init()
3249 dd->port->s_active[i] = in mtip_hw_init()
3250 dd->port->mmio + i*0x80 + PORT_SCR_ACT; in mtip_hw_init()
3251 dd->port->cmd_issue[i] = in mtip_hw_init()
3252 dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE; in mtip_hw_init()
3253 dd->port->completed[i] = in mtip_hw_init()
3254 dd->port->mmio + i*0x80 + PORT_SDBV; in mtip_hw_init()
3259 while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) && in mtip_hw_init()
3263 if (unlikely(mtip_check_surprise_removal(dd->pdev))) { in mtip_hw_init()
3265 dev_warn(&dd->pdev->dev, in mtip_hw_init()
3271 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) { in mtip_hw_init()
3273 dev_warn(&dd->pdev->dev, in mtip_hw_init()
3281 if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) { in mtip_hw_init()
3282 if (mtip_hba_reset(dd) < 0) { in mtip_hw_init()
3283 dev_err(&dd->pdev->dev, in mtip_hw_init()
3290 writel(readl(dd->mmio + HOST_IRQ_STAT), in mtip_hw_init()
3291 dd->mmio + HOST_IRQ_STAT); in mtip_hw_init()
3294 mtip_init_port(dd->port); in mtip_hw_init()
3295 mtip_start_port(dd->port); in mtip_hw_init()
3298 rv = devm_request_irq(&dd->pdev->dev, in mtip_hw_init()
3299 dd->pdev->irq, in mtip_hw_init()
3302 dev_driver_string(&dd->pdev->dev), in mtip_hw_init()
3303 dd); in mtip_hw_init()
3306 dev_err(&dd->pdev->dev, in mtip_hw_init()
3307 "Unable to allocate IRQ %d\n", dd->pdev->irq); in mtip_hw_init()
3310 irq_set_affinity_hint(dd->pdev->irq, get_cpu_mask(dd->isr_binding)); in mtip_hw_init()
3313 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, in mtip_hw_init()
3314 dd->mmio + HOST_CTL); in mtip_hw_init()
3316 init_waitqueue_head(&dd->port->svc_wait); in mtip_hw_init()
3318 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { in mtip_hw_init()
3327 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, in mtip_hw_init()
3328 dd->mmio + HOST_CTL); in mtip_hw_init()
3331 irq_set_affinity_hint(dd->pdev->irq, NULL); in mtip_hw_init()
3332 devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); in mtip_hw_init()
3335 mtip_deinit_port(dd->port); in mtip_hw_init()
3336 mtip_dma_free(dd); in mtip_hw_init()
3340 kfree(dd->port); in mtip_hw_init()
3345 static int mtip_standby_drive(struct driver_data *dd) in mtip_standby_drive() argument
3349 if (dd->sr || !dd->port) in mtip_standby_drive()
3355 if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) && in mtip_standby_drive()
3356 !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) && in mtip_standby_drive()
3357 !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) { in mtip_standby_drive()
3358 rv = mtip_standby_immediate(dd->port); in mtip_standby_drive()
3360 dev_warn(&dd->pdev->dev, in mtip_standby_drive()
3374 static int mtip_hw_exit(struct driver_data *dd) in mtip_hw_exit() argument
3376 if (!dd->sr) { in mtip_hw_exit()
3378 mtip_deinit_port(dd->port); in mtip_hw_exit()
3381 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, in mtip_hw_exit()
3382 dd->mmio + HOST_CTL); in mtip_hw_exit()
3386 irq_set_affinity_hint(dd->pdev->irq, NULL); in mtip_hw_exit()
3387 devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); in mtip_hw_exit()
3391 mtip_dma_free(dd); in mtip_hw_exit()
3394 kfree(dd->port); in mtip_hw_exit()
3395 dd->port = NULL; in mtip_hw_exit()
3411 static int mtip_hw_shutdown(struct driver_data *dd) in mtip_hw_shutdown() argument
3417 mtip_standby_drive(dd); in mtip_hw_shutdown()
3434 static int mtip_hw_suspend(struct driver_data *dd) in mtip_hw_suspend() argument
3440 if (mtip_standby_drive(dd) != 0) { in mtip_hw_suspend()
3441 dev_err(&dd->pdev->dev, in mtip_hw_suspend()
3447 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, in mtip_hw_suspend()
3448 dd->mmio + HOST_CTL); in mtip_hw_suspend()
3449 mtip_deinit_port(dd->port); in mtip_hw_suspend()
3466 static int mtip_hw_resume(struct driver_data *dd) in mtip_hw_resume() argument
3469 hba_setup(dd); in mtip_hw_resume()
3472 if (mtip_hba_reset(dd) != 0) { in mtip_hw_resume()
3473 dev_err(&dd->pdev->dev, in mtip_hw_resume()
3482 mtip_init_port(dd->port); in mtip_hw_resume()
3483 mtip_start_port(dd->port); in mtip_hw_resume()
3486 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, in mtip_hw_resume()
3487 dd->mmio + HOST_CTL); in mtip_hw_resume()
3541 struct driver_data *dd = dev->bd_disk->private_data; in mtip_block_ioctl() local
3546 if (!dd) in mtip_block_ioctl()
3549 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) in mtip_block_ioctl()
3556 return mtip_hw_ioctl(dd, cmd, arg); in mtip_block_ioctl()
3579 struct driver_data *dd = dev->bd_disk->private_data; in mtip_block_compat_ioctl() local
3584 if (!dd) in mtip_block_compat_ioctl()
3587 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) in mtip_block_compat_ioctl()
3616 ret = exec_drive_taskfile(dd, (void __user *) arg, in mtip_block_compat_ioctl()
3633 return mtip_hw_ioctl(dd, cmd, arg); in mtip_block_compat_ioctl()
3659 struct driver_data *dd = dev->bd_disk->private_data; in mtip_block_getgeo() local
3662 if (!dd) in mtip_block_getgeo()
3665 if (!(mtip_hw_get_capacity(dd, &capacity))) { in mtip_block_getgeo()
3666 dev_warn(&dd->pdev->dev, in mtip_block_getgeo()
3680 struct driver_data *dd; in mtip_block_open() local
3683 dd = (struct driver_data *) dev->bd_disk->private_data; in mtip_block_open()
3685 if (dd) { in mtip_block_open()
3687 &dd->dd_flag)) { in mtip_block_open()
3717 static inline bool is_se_active(struct driver_data *dd) in is_se_active() argument
3719 if (unlikely(test_bit(MTIP_PF_SE_ACTIVE_BIT, &dd->port->flags))) { in is_se_active()
3720 if (dd->port->ic_pause_timer) { in is_se_active()
3721 unsigned long to = dd->port->ic_pause_timer + in is_se_active()
3725 &dd->port->flags); in is_se_active()
3726 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag); in is_se_active()
3727 dd->port->ic_pause_timer = 0; in is_se_active()
3728 wake_up_interruptible(&dd->port->svc_wait); in is_se_active()
3750 struct driver_data *dd = hctx->queue->queuedata; in mtip_submit_request() local
3754 if (is_se_active(dd)) in mtip_submit_request()
3757 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { in mtip_submit_request()
3759 &dd->dd_flag))) { in mtip_submit_request()
3762 if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) { in mtip_submit_request()
3766 &dd->dd_flag) && in mtip_submit_request()
3770 if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) || in mtip_submit_request()
3771 test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))) in mtip_submit_request()
3778 err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq)); in mtip_submit_request()
3787 mtip_hw_submit_io(dd, rq, cmd, nents, hctx); in mtip_submit_request()
3794 struct driver_data *dd = hctx->queue->queuedata; in mtip_check_unal_depth() local
3797 if (rq_data_dir(rq) == READ || !dd->unal_qdepth) in mtip_check_unal_depth()
3809 if (cmd->unaligned && down_trylock(&dd->port->cmd_slot_unal)) in mtip_check_unal_depth()
3837 struct driver_data *dd = data; in mtip_free_cmd() local
3843 dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, in mtip_free_cmd()
3850 struct driver_data *dd = data; in mtip_init_cmd() local
3852 u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64; in mtip_init_cmd()
3862 cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, in mtip_init_cmd()
3870 cmd->command_header = dd->port->command_list + in mtip_init_cmd()
3872 cmd->command_header_dma = dd->port->command_list_dma + in mtip_init_cmd()
3887 struct driver_data *dd = req->q->queuedata; in mtip_cmd_timeout() local
3893 if (test_bit(req->tag, dd->port->cmds_to_issue)) in mtip_cmd_timeout()
3896 if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags)) in mtip_cmd_timeout()
3899 wake_up_interruptible(&dd->port->svc_wait); in mtip_cmd_timeout()
3924 static int mtip_block_initialize(struct driver_data *dd) in mtip_block_initialize() argument
3931 if (dd->disk) in mtip_block_initialize()
3934 if (mtip_hw_init(dd)) { in mtip_block_initialize()
3939 dd->disk = alloc_disk_node(MTIP_MAX_MINORS, dd->numa_node); in mtip_block_initialize()
3940 if (dd->disk == NULL) { in mtip_block_initialize()
3941 dev_err(&dd->pdev->dev, in mtip_block_initialize()
3962 dd->disk->disk_name, in mtip_block_initialize()
3967 dd->disk->driverfs_dev = &dd->pdev->dev; in mtip_block_initialize()
3968 dd->disk->major = dd->major; in mtip_block_initialize()
3969 dd->disk->first_minor = index * MTIP_MAX_MINORS; in mtip_block_initialize()
3970 dd->disk->minors = MTIP_MAX_MINORS; in mtip_block_initialize()
3971 dd->disk->fops = &mtip_block_ops; in mtip_block_initialize()
3972 dd->disk->private_data = dd; in mtip_block_initialize()
3973 dd->index = index; in mtip_block_initialize()
3975 mtip_hw_debugfs_init(dd); in mtip_block_initialize()
3977 memset(&dd->tags, 0, sizeof(dd->tags)); in mtip_block_initialize()
3978 dd->tags.ops = &mtip_mq_ops; in mtip_block_initialize()
3979 dd->tags.nr_hw_queues = 1; in mtip_block_initialize()
3980 dd->tags.queue_depth = MTIP_MAX_COMMAND_SLOTS; in mtip_block_initialize()
3981 dd->tags.reserved_tags = 1; in mtip_block_initialize()
3982 dd->tags.cmd_size = sizeof(struct mtip_cmd); in mtip_block_initialize()
3983 dd->tags.numa_node = dd->numa_node; in mtip_block_initialize()
3984 dd->tags.flags = BLK_MQ_F_SHOULD_MERGE; in mtip_block_initialize()
3985 dd->tags.driver_data = dd; in mtip_block_initialize()
3986 dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS; in mtip_block_initialize()
3988 rv = blk_mq_alloc_tag_set(&dd->tags); in mtip_block_initialize()
3990 dev_err(&dd->pdev->dev, in mtip_block_initialize()
3996 dd->queue = blk_mq_init_queue(&dd->tags); in mtip_block_initialize()
3997 if (IS_ERR(dd->queue)) { in mtip_block_initialize()
3998 dev_err(&dd->pdev->dev, in mtip_block_initialize()
4004 dd->disk->queue = dd->queue; in mtip_block_initialize()
4005 dd->queue->queuedata = dd; in mtip_block_initialize()
4009 wait_for_rebuild = mtip_hw_get_identify(dd); in mtip_block_initialize()
4011 dev_err(&dd->pdev->dev, in mtip_block_initialize()
4025 set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags); in mtip_block_initialize()
4026 clear_bit(QUEUE_FLAG_ADD_RANDOM, &dd->queue->queue_flags); in mtip_block_initialize()
4027 blk_queue_max_segments(dd->queue, MTIP_MAX_SG); in mtip_block_initialize()
4028 blk_queue_physical_block_size(dd->queue, 4096); in mtip_block_initialize()
4029 blk_queue_max_hw_sectors(dd->queue, 0xffff); in mtip_block_initialize()
4030 blk_queue_max_segment_size(dd->queue, 0x400000); in mtip_block_initialize()
4031 blk_queue_io_min(dd->queue, 4096); in mtip_block_initialize()
4032 blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask); in mtip_block_initialize()
4038 blk_queue_flush(dd->queue, 0); in mtip_block_initialize()
4041 if (dd->trim_supp == true) { in mtip_block_initialize()
4042 set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags); in mtip_block_initialize()
4043 dd->queue->limits.discard_granularity = 4096; in mtip_block_initialize()
4044 blk_queue_max_discard_sectors(dd->queue, in mtip_block_initialize()
4046 dd->queue->limits.discard_zeroes_data = 0; in mtip_block_initialize()
4050 if (!(mtip_hw_get_capacity(dd, &capacity))) { in mtip_block_initialize()
4051 dev_warn(&dd->pdev->dev, in mtip_block_initialize()
4056 set_capacity(dd->disk, capacity); in mtip_block_initialize()
4059 add_disk(dd->disk); in mtip_block_initialize()
4061 dd->bdev = bdget_disk(dd->disk, 0); in mtip_block_initialize()
4066 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); in mtip_block_initialize()
4068 mtip_hw_sysfs_init(dd, kobj); in mtip_block_initialize()
4072 if (dd->mtip_svc_handler) { in mtip_block_initialize()
4073 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); in mtip_block_initialize()
4078 dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, in mtip_block_initialize()
4079 dd, dd->numa_node, in mtip_block_initialize()
4082 if (IS_ERR(dd->mtip_svc_handler)) { in mtip_block_initialize()
4083 dev_err(&dd->pdev->dev, "service thread failed to start\n"); in mtip_block_initialize()
4084 dd->mtip_svc_handler = NULL; in mtip_block_initialize()
4088 wake_up_process(dd->mtip_svc_handler); in mtip_block_initialize()
4095 bdput(dd->bdev); in mtip_block_initialize()
4096 dd->bdev = NULL; in mtip_block_initialize()
4099 del_gendisk(dd->disk); in mtip_block_initialize()
4103 blk_cleanup_queue(dd->queue); in mtip_block_initialize()
4105 blk_mq_free_tag_set(&dd->tags); in mtip_block_initialize()
4107 mtip_hw_debugfs_exit(dd); in mtip_block_initialize()
4114 put_disk(dd->disk); in mtip_block_initialize()
4117 mtip_hw_exit(dd); /* De-initialize the protocol layer. */ in mtip_block_initialize()
4125 struct driver_data *dd = (struct driver_data *)data; in mtip_no_dev_cleanup() local
4130 else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) { in mtip_no_dev_cleanup()
4132 cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); in mtip_no_dev_cleanup()
4134 cmd->comp_func(dd->port, MTIP_TAG_INTERNAL, in mtip_no_dev_cleanup()
4149 static int mtip_block_remove(struct driver_data *dd) in mtip_block_remove() argument
4153 mtip_hw_debugfs_exit(dd); in mtip_block_remove()
4155 if (dd->mtip_svc_handler) { in mtip_block_remove()
4156 set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags); in mtip_block_remove()
4157 wake_up_interruptible(&dd->port->svc_wait); in mtip_block_remove()
4158 kthread_stop(dd->mtip_svc_handler); in mtip_block_remove()
4162 if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) { in mtip_block_remove()
4163 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); in mtip_block_remove()
4165 mtip_hw_sysfs_exit(dd, kobj); in mtip_block_remove()
4170 if (!dd->sr) { in mtip_block_remove()
4175 if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS, in mtip_block_remove()
4177 mtip_standby_drive(dd); in mtip_block_remove()
4180 dev_info(&dd->pdev->dev, "device %s surprise removal\n", in mtip_block_remove()
4181 dd->disk->disk_name); in mtip_block_remove()
4183 blk_mq_freeze_queue_start(dd->queue); in mtip_block_remove()
4184 blk_mq_stop_hw_queues(dd->queue); in mtip_block_remove()
4185 blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd); in mtip_block_remove()
4191 if (dd->bdev) { in mtip_block_remove()
4192 bdput(dd->bdev); in mtip_block_remove()
4193 dd->bdev = NULL; in mtip_block_remove()
4195 if (dd->disk) { in mtip_block_remove()
4196 if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) in mtip_block_remove()
4197 del_gendisk(dd->disk); in mtip_block_remove()
4198 if (dd->disk->queue) { in mtip_block_remove()
4199 blk_cleanup_queue(dd->queue); in mtip_block_remove()
4200 blk_mq_free_tag_set(&dd->tags); in mtip_block_remove()
4201 dd->queue = NULL; in mtip_block_remove()
4203 put_disk(dd->disk); in mtip_block_remove()
4205 dd->disk = NULL; in mtip_block_remove()
4208 ida_remove(&rssd_index_ida, dd->index); in mtip_block_remove()
4212 mtip_hw_exit(dd); in mtip_block_remove()
4229 static int mtip_block_shutdown(struct driver_data *dd) in mtip_block_shutdown() argument
4231 mtip_hw_shutdown(dd); in mtip_block_shutdown()
4234 if (dd->disk) { in mtip_block_shutdown()
4235 dev_info(&dd->pdev->dev, in mtip_block_shutdown()
4236 "Shutting down %s ...\n", dd->disk->disk_name); in mtip_block_shutdown()
4238 if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) in mtip_block_shutdown()
4239 del_gendisk(dd->disk); in mtip_block_shutdown()
4240 if (dd->disk->queue) { in mtip_block_shutdown()
4241 blk_cleanup_queue(dd->queue); in mtip_block_shutdown()
4242 blk_mq_free_tag_set(&dd->tags); in mtip_block_shutdown()
4244 put_disk(dd->disk); in mtip_block_shutdown()
4245 dd->disk = NULL; in mtip_block_shutdown()
4246 dd->queue = NULL; in mtip_block_shutdown()
4250 ida_remove(&rssd_index_ida, dd->index); in mtip_block_shutdown()
4255 static int mtip_block_suspend(struct driver_data *dd) in mtip_block_suspend() argument
4257 dev_info(&dd->pdev->dev, in mtip_block_suspend()
4258 "Suspending %s ...\n", dd->disk->disk_name); in mtip_block_suspend()
4259 mtip_hw_suspend(dd); in mtip_block_suspend()
4263 static int mtip_block_resume(struct driver_data *dd) in mtip_block_resume() argument
4265 dev_info(&dd->pdev->dev, "Resuming %s ...\n", in mtip_block_resume()
4266 dd->disk->disk_name); in mtip_block_resume()
4267 mtip_hw_resume(dd); in mtip_block_resume()
4321 static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev) in mtip_disable_link_opts() argument
4333 dev_info(&dd->pdev->dev, in mtip_disable_link_opts()
4345 static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev) in mtip_fix_ero_nosnoop() argument
4354 mtip_disable_link_opts(dd, pdev->bus->self); in mtip_fix_ero_nosnoop()
4365 mtip_disable_link_opts(dd, in mtip_fix_ero_nosnoop()
4385 struct driver_data *dd = NULL; in mtip_pci_probe() local
4405 dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node); in mtip_pci_probe()
4406 if (dd == NULL) { in mtip_pci_probe()
4413 pci_set_drvdata(pdev, dd); in mtip_pci_probe()
4443 dd->major = mtip_major; in mtip_pci_probe()
4444 dd->instance = instance; in mtip_pci_probe()
4445 dd->pdev = pdev; in mtip_pci_probe()
4446 dd->numa_node = my_node; in mtip_pci_probe()
4448 INIT_LIST_HEAD(&dd->online_list); in mtip_pci_probe()
4449 INIT_LIST_HEAD(&dd->remove_list); in mtip_pci_probe()
4451 memset(dd->workq_name, 0, 32); in mtip_pci_probe()
4452 snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); in mtip_pci_probe()
4454 dd->isr_workq = create_workqueue(dd->workq_name); in mtip_pci_probe()
4455 if (!dd->isr_workq) { in mtip_pci_probe()
4456 dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance); in mtip_pci_probe()
4463 node_mask = cpumask_of_node(dd->numa_node); in mtip_pci_probe()
4472 dd->numa_node, in mtip_pci_probe()
4474 nr_cpus_node(dd->numa_node), in mtip_pci_probe()
4479 dd->isr_binding = get_least_used_cpu_on_node(dd->numa_node); in mtip_pci_probe()
4481 cpu_to_node(dd->isr_binding), dd->isr_binding); in mtip_pci_probe()
4484 dd->work[0].cpu_binding = dd->isr_binding; in mtip_pci_probe()
4485 dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node); in mtip_pci_probe()
4486 dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node); in mtip_pci_probe()
4487 dd->work[3].cpu_binding = dd->work[0].cpu_binding; in mtip_pci_probe()
4488 dd->work[4].cpu_binding = dd->work[1].cpu_binding; in mtip_pci_probe()
4489 dd->work[5].cpu_binding = dd->work[2].cpu_binding; in mtip_pci_probe()
4490 dd->work[6].cpu_binding = dd->work[2].cpu_binding; in mtip_pci_probe()
4491 dd->work[7].cpu_binding = dd->work[1].cpu_binding; in mtip_pci_probe()
4497 if (dd->work[i].cpu_binding == cpu) { in mtip_pci_probe()
4506 INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0); in mtip_pci_probe()
4507 INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1); in mtip_pci_probe()
4508 INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2); in mtip_pci_probe()
4509 INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3); in mtip_pci_probe()
4510 INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4); in mtip_pci_probe()
4511 INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5); in mtip_pci_probe()
4512 INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6); in mtip_pci_probe()
4513 INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7); in mtip_pci_probe()
4523 mtip_fix_ero_nosnoop(dd, pdev); in mtip_pci_probe()
4526 rv = mtip_block_initialize(dd); in mtip_pci_probe()
4539 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); in mtip_pci_probe()
4545 list_add(&dd->online_list, &online_list); in mtip_pci_probe()
4554 if (dd->isr_workq) { in mtip_pci_probe()
4555 flush_workqueue(dd->isr_workq); in mtip_pci_probe()
4556 destroy_workqueue(dd->isr_workq); in mtip_pci_probe()
4557 drop_cpu(dd->work[0].cpu_binding); in mtip_pci_probe()
4558 drop_cpu(dd->work[1].cpu_binding); in mtip_pci_probe()
4559 drop_cpu(dd->work[2].cpu_binding); in mtip_pci_probe()
4565 kfree(dd); in mtip_pci_probe()
4581 struct driver_data *dd = pci_get_drvdata(pdev); in mtip_pci_remove() local
4584 set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag); in mtip_pci_remove()
4587 list_del_init(&dd->online_list); in mtip_pci_remove()
4588 list_add(&dd->remove_list, &removing_list); in mtip_pci_remove()
4592 synchronize_irq(dd->pdev->irq); in mtip_pci_remove()
4598 } while (atomic_read(&dd->irq_workers_active) != 0 && in mtip_pci_remove()
4601 if (!dd->sr) in mtip_pci_remove()
4602 fsync_bdev(dd->bdev); in mtip_pci_remove()
4604 if (atomic_read(&dd->irq_workers_active) != 0) { in mtip_pci_remove()
4605 dev_warn(&dd->pdev->dev, in mtip_pci_remove()
4609 blk_set_queue_dying(dd->queue); in mtip_pci_remove()
4610 set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); in mtip_pci_remove()
4613 mtip_block_remove(dd); in mtip_pci_remove()
4615 if (dd->isr_workq) { in mtip_pci_remove()
4616 flush_workqueue(dd->isr_workq); in mtip_pci_remove()
4617 destroy_workqueue(dd->isr_workq); in mtip_pci_remove()
4618 drop_cpu(dd->work[0].cpu_binding); in mtip_pci_remove()
4619 drop_cpu(dd->work[1].cpu_binding); in mtip_pci_remove()
4620 drop_cpu(dd->work[2].cpu_binding); in mtip_pci_remove()
4626 list_del_init(&dd->remove_list); in mtip_pci_remove()
4629 kfree(dd); in mtip_pci_remove()
4645 struct driver_data *dd = pci_get_drvdata(pdev); in mtip_pci_suspend() local
4647 if (!dd) { in mtip_pci_suspend()
4653 set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag); in mtip_pci_suspend()
4656 rv = mtip_block_suspend(dd); in mtip_pci_suspend()
4686 struct driver_data *dd; in mtip_pci_resume() local
4688 dd = pci_get_drvdata(pdev); in mtip_pci_resume()
4689 if (!dd) { in mtip_pci_resume()
4714 rv = mtip_block_resume(dd); in mtip_pci_resume()
4719 clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag); in mtip_pci_resume()
4732 struct driver_data *dd = pci_get_drvdata(pdev); in mtip_pci_shutdown() local
4733 if (dd) in mtip_pci_shutdown()
4734 mtip_block_shutdown(dd); in mtip_pci_shutdown()