This source file includes following definitions.
- sdev_to_hba
- shost_to_hba
- hpsa_is_cmd_idle
- decode_sense_data
- check_for_unit_attention
- check_for_busy
- host_show_lockup_detected
- host_store_hp_ssd_smart_path_status
- host_store_raid_offload_debug
- host_store_rescan
- host_show_firmware_revision
- host_show_commands_outstanding
- host_show_transport_mode
- host_show_hp_ssd_smart_path_status
- board_id_in_array
- ctlr_is_hard_resettable
- ctlr_is_soft_resettable
- ctlr_is_resettable
- host_show_resettable
- is_logical_dev_addr_mode
- is_logical_device
- raid_level_show
- lunid_show
- unique_id_show
- sas_address_show
- host_show_hp_ssd_smart_path_enabled
- path_info_show
- host_show_ctlr_num
- host_show_legacy_board
- next_command
- set_performant_mode
- set_ioaccel1_performant_mode
- set_ioaccel2_tmf_performant_mode
- set_ioaccel2_performant_mode
- is_firmware_flash_cmd
- dial_down_lockup_detection_during_fw_flash
- dial_up_lockup_detection_on_fw_flash_complete
- __enqueue_cmd_and_start_io
- enqueue_cmd_and_start_io
- is_hba_lunid
- is_scsi_rev_5
- hpsa_find_target_lun
- hpsa_show_dev_msg
- hpsa_scsi_add_entry
- hpsa_scsi_update_entry
- hpsa_scsi_replace_entry
- hpsa_scsi_remove_entry
- fixup_botched_add
- device_is_the_same
- device_updated
- hpsa_scsi_find_entry
- hpsa_monitor_offline_device
- hpsa_show_volume_status
- hpsa_figure_phys_disk_ptrs
- hpsa_update_log_drive_phys_drive_ptrs
- hpsa_add_device
- hpsa_find_outstanding_commands_for_dev
- hpsa_wait_for_outstanding_commands_for_dev
- hpsa_remove_device
- adjust_hpsa_scsi_table
- lookup_hpsa_scsi_dev
- hpsa_slave_alloc
- hpsa_slave_configure
- hpsa_slave_destroy
- hpsa_free_ioaccel2_sg_chain_blocks
- hpsa_allocate_ioaccel2_sg_chain_blocks
- hpsa_free_sg_chain_blocks
- hpsa_alloc_sg_chain_blocks
- hpsa_map_ioaccel2_sg_chain_block
- hpsa_unmap_ioaccel2_sg_chain_block
- hpsa_map_sg_chain_block
- hpsa_unmap_sg_chain_block
- handle_ioaccel_mode2_error
- hpsa_cmd_resolve_events
- hpsa_cmd_resolve_and_free
- hpsa_cmd_free_and_done
- hpsa_retry_cmd
- process_ioaccel2_completion
- hpsa_evaluate_tmf_status
- complete_scsi_command
- hpsa_pci_unmap
- hpsa_map_one
- hpsa_scsi_do_simple_cmd_core
- hpsa_scsi_do_simple_cmd
- lockup_detected
- hpsa_scsi_do_simple_cmd_with_retry
- hpsa_print_cmd
- hpsa_scsi_interpret_error
- hpsa_do_receive_diagnostic
- hpsa_get_enclosure_logical_identifier
- hpsa_scsi_do_inquiry
- hpsa_send_reset
- hpsa_cmd_dev_match
- hpsa_do_reset
- hpsa_get_raid_level
- hpsa_debug_map_buff
- hpsa_debug_map_buff
- hpsa_get_raid_map
- hpsa_bmic_sense_subsystem_information
- hpsa_bmic_id_controller
- hpsa_bmic_id_physical_device
- hpsa_get_enclosure_info
- hpsa_get_sas_address_from_report_physical
- hpsa_get_sas_address
- hpsa_ext_ctrl_present
- hpsa_vpd_page_supported
- hpsa_get_ioaccel_status
- hpsa_get_device_id
- hpsa_scsi_do_report_luns
- hpsa_scsi_do_report_phys_luns
- hpsa_scsi_do_report_log_luns
- hpsa_set_bus_target_lun
- hpsa_get_volume_status
- hpsa_volume_offline
- hpsa_update_device_info
- figure_bus_target_lun
- figure_external_status
- hpsa_gather_lun_info
- figure_lunaddrbytes
- hpsa_get_ioaccel_drive_info
- hpsa_get_path_info
- hpsa_set_local_logical_count
- hpsa_is_disk_spare
- hpsa_skip_device
- hpsa_update_scsi_devices
- hpsa_set_sg_descriptor
- hpsa_scatter_gather
- warn_zero_length_transfer
- is_zero_length_transfer
- fixup_ioaccel_cdb
- hpsa_scsi_ioaccel1_queue_command
- hpsa_scsi_ioaccel_direct_map
- set_encrypt_ioaccel2
- hpsa_scsi_ioaccel2_queue_command
- hpsa_scsi_ioaccel_queue_command
- raid_map_helper
- hpsa_scsi_ioaccel_raid_map
- hpsa_ciss_submit
- hpsa_cmd_init
- hpsa_preinitialize_commands
- hpsa_cmd_partial_init
- hpsa_ioaccel_submit
- hpsa_command_resubmit_worker
- hpsa_scsi_queue_command
- hpsa_scan_complete
- hpsa_scan_start
- hpsa_change_queue_depth
- hpsa_scan_finished
- hpsa_scsi_host_alloc
- hpsa_scsi_add_host
- hpsa_get_cmd_index
- hpsa_send_test_unit_ready
- hpsa_wait_for_test_unit_ready
- wait_for_device_to_become_ready
- hpsa_eh_device_reset_handler
- cmd_tagged_alloc
- cmd_tagged_free
- cmd_alloc
- cmd_free
- hpsa_ioctl32_passthru
- hpsa_ioctl32_big_passthru
- hpsa_compat_ioctl
- hpsa_getpciinfo_ioctl
- hpsa_getdrivver_ioctl
- hpsa_passthru_ioctl
- hpsa_big_passthru_ioctl
- check_ioctl_unit_attention
- hpsa_ioctl
- hpsa_send_host_reset
- fill_cmd
- remap_pci_mem
- get_next_completion
- interrupt_pending
- interrupt_not_for_us
- bad_tag
- finish_cmd
- process_indexed_cmd
- ignore_bogus_interrupt
- queue_to_hba
- hpsa_intx_discard_completions
- hpsa_msix_discard_completions
- do_hpsa_intr_intx
- do_hpsa_intr_msi
- hpsa_message
- hpsa_controller_hard_reset
- init_driver_version
- write_driver_ver_to_cfgtable
- read_driver_ver_from_cfgtable
- controller_reset_failed
- hpsa_kdump_hard_reset_controller
- print_cfg_table
- find_PCI_BAR_index
- hpsa_disable_interrupt_mode
- hpsa_setup_reply_map
- hpsa_interrupt_mode
- hpsa_lookup_board_id
- hpsa_pci_find_memory_BAR
- hpsa_wait_for_board_state
- hpsa_find_cfg_addrs
- hpsa_free_cfgtables
- hpsa_find_cfgtables
- hpsa_get_max_perf_mode_cmds
- hpsa_supports_chained_sg_blocks
- hpsa_find_board_params
- hpsa_CISS_signature_present
- hpsa_set_driver_support_bits
- hpsa_p600_dma_prefetch_quirk
- hpsa_wait_for_clear_event_notify_ack
- hpsa_wait_for_mode_change_ack
- hpsa_enter_simple_mode
- hpsa_free_pci_init
- hpsa_pci_init
- hpsa_hba_inquiry
- hpsa_init_reset_devices
- hpsa_free_cmd_pool
- hpsa_alloc_cmd_pool
- hpsa_free_irqs
- hpsa_request_irqs
- hpsa_kdump_soft_reset
- hpsa_free_reply_queues
- hpsa_undo_allocations_after_kdump_soft_reset
- fail_all_outstanding_cmds
- set_lockup_detected_for_all_cpus
- controller_lockup_detected
- detect_controller_lockup
- hpsa_set_ioaccel_status
- hpsa_ack_ctlr_events
- hpsa_ctlr_needs_rescan
- hpsa_offline_devices_ready
- hpsa_luns_changed
- hpsa_perform_rescan
- hpsa_event_monitor_worker
- hpsa_rescan_ctlr_worker
- hpsa_monitor_ctlr_worker
- hpsa_create_controller_wq
- hpda_free_ctlr_info
- hpda_alloc_ctlr_info
- hpsa_init_one
- hpsa_flush_cache
- hpsa_disable_rld_caching
- __hpsa_shutdown
- hpsa_shutdown
- hpsa_free_device_info
- hpsa_remove_one
- hpsa_suspend
- hpsa_resume
- calc_bucket_map
- hpsa_enter_performant_mode
- hpsa_free_ioaccel1_cmd_and_bft
- hpsa_alloc_ioaccel1_cmd_and_bft
- hpsa_free_ioaccel2_cmd_and_bft
- hpsa_alloc_ioaccel2_cmd_and_bft
- hpsa_free_performant_mode
- hpsa_put_ctlr_into_performant_mode
- is_accelerated_cmd
- hpsa_drain_accel_commands
- hpsa_alloc_sas_phy
- hpsa_free_sas_phy
- hpsa_sas_port_add_phy
- hpsa_sas_port_add_rphy
- hpsa_alloc_sas_port
- hpsa_free_sas_port
- hpsa_alloc_sas_node
- hpsa_free_sas_node
- hpsa_find_device_by_sas_rphy
- hpsa_add_sas_host
- hpsa_delete_sas_host
- hpsa_add_sas_device
- hpsa_remove_sas_device
- hpsa_sas_get_linkerrors
- hpsa_sas_get_enclosure_identifier
- hpsa_sas_get_bay_identifier
- hpsa_sas_phy_reset
- hpsa_sas_phy_enable
- hpsa_sas_phy_setup
- hpsa_sas_phy_release
- hpsa_sas_phy_speed
- hpsa_init
- hpsa_cleanup
- verify_offsets
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/fs.h>
28 #include <linux/timer.h>
29 #include <linux/init.h>
30 #include <linux/spinlock.h>
31 #include <linux/compat.h>
32 #include <linux/blktrace_api.h>
33 #include <linux/uaccess.h>
34 #include <linux/io.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/completion.h>
37 #include <linux/moduleparam.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi_tcq.h>
43 #include <scsi/scsi_eh.h>
44 #include <scsi/scsi_transport_sas.h>
45 #include <scsi/scsi_dbg.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <linux/atomic.h>
50 #include <linux/jiffies.h>
51 #include <linux/percpu-defs.h>
52 #include <linux/percpu.h>
53 #include <asm/unaligned.h>
54 #include <asm/div64.h>
55 #include "hpsa_cmd.h"
56 #include "hpsa.h"
57
58
59
60
61
62 #define HPSA_DRIVER_VERSION "3.4.20-170"
63 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
64 #define HPSA "hpsa"
65
66
67 #define CLEAR_EVENT_WAIT_INTERVAL 20
68 #define MODE_CHANGE_WAIT_INTERVAL 10
69 #define MAX_CLEAR_EVENT_WAIT 30000
70 #define MAX_MODE_CHANGE_WAIT 2000
71 #define MAX_IOCTL_CONFIG_WAIT 1000
72
73
74 #define MAX_CMD_RETRIES 3
75
76 #define HPSA_EH_PTRAID_TIMEOUT (240 * HZ)
77
78
79 MODULE_AUTHOR("Hewlett-Packard Company");
80 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
81 HPSA_DRIVER_VERSION);
82 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
83 MODULE_VERSION(HPSA_DRIVER_VERSION);
84 MODULE_LICENSE("GPL");
85 MODULE_ALIAS("cciss");
86
87 static int hpsa_simple_mode;
88 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
89 MODULE_PARM_DESC(hpsa_simple_mode,
90 "Use 'simple mode' rather than 'performant mode'");
91
92
93 static const struct pci_device_id hpsa_pci_device_id[] = {
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
134 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
135 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
136 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
137 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
138 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
139 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
140 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
141 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
142 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
143 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
144 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
145 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
146 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
147 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
148 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
149 {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
150 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
151 {0,}
152 };
153
154 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
155
156
157
158
159
160 static struct board_type products[] = {
161 {0x40700E11, "Smart Array 5300", &SA5A_access},
162 {0x40800E11, "Smart Array 5i", &SA5B_access},
163 {0x40820E11, "Smart Array 532", &SA5B_access},
164 {0x40830E11, "Smart Array 5312", &SA5B_access},
165 {0x409A0E11, "Smart Array 641", &SA5A_access},
166 {0x409B0E11, "Smart Array 642", &SA5A_access},
167 {0x409C0E11, "Smart Array 6400", &SA5A_access},
168 {0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
169 {0x40910E11, "Smart Array 6i", &SA5A_access},
170 {0x3225103C, "Smart Array P600", &SA5A_access},
171 {0x3223103C, "Smart Array P800", &SA5A_access},
172 {0x3234103C, "Smart Array P400", &SA5A_access},
173 {0x3235103C, "Smart Array P400i", &SA5A_access},
174 {0x3211103C, "Smart Array E200i", &SA5A_access},
175 {0x3212103C, "Smart Array E200", &SA5A_access},
176 {0x3213103C, "Smart Array E200i", &SA5A_access},
177 {0x3214103C, "Smart Array E200i", &SA5A_access},
178 {0x3215103C, "Smart Array E200i", &SA5A_access},
179 {0x3237103C, "Smart Array E500", &SA5A_access},
180 {0x323D103C, "Smart Array P700m", &SA5A_access},
181 {0x3241103C, "Smart Array P212", &SA5_access},
182 {0x3243103C, "Smart Array P410", &SA5_access},
183 {0x3245103C, "Smart Array P410i", &SA5_access},
184 {0x3247103C, "Smart Array P411", &SA5_access},
185 {0x3249103C, "Smart Array P812", &SA5_access},
186 {0x324A103C, "Smart Array P712m", &SA5_access},
187 {0x324B103C, "Smart Array P711m", &SA5_access},
188 {0x3233103C, "HP StorageWorks 1210m", &SA5_access},
189 {0x3350103C, "Smart Array P222", &SA5_access},
190 {0x3351103C, "Smart Array P420", &SA5_access},
191 {0x3352103C, "Smart Array P421", &SA5_access},
192 {0x3353103C, "Smart Array P822", &SA5_access},
193 {0x3354103C, "Smart Array P420i", &SA5_access},
194 {0x3355103C, "Smart Array P220i", &SA5_access},
195 {0x3356103C, "Smart Array P721m", &SA5_access},
196 {0x1920103C, "Smart Array P430i", &SA5_access},
197 {0x1921103C, "Smart Array P830i", &SA5_access},
198 {0x1922103C, "Smart Array P430", &SA5_access},
199 {0x1923103C, "Smart Array P431", &SA5_access},
200 {0x1924103C, "Smart Array P830", &SA5_access},
201 {0x1925103C, "Smart Array P831", &SA5_access},
202 {0x1926103C, "Smart Array P731m", &SA5_access},
203 {0x1928103C, "Smart Array P230i", &SA5_access},
204 {0x1929103C, "Smart Array P530", &SA5_access},
205 {0x21BD103C, "Smart Array P244br", &SA5_access},
206 {0x21BE103C, "Smart Array P741m", &SA5_access},
207 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
208 {0x21C0103C, "Smart Array P440ar", &SA5_access},
209 {0x21C1103C, "Smart Array P840ar", &SA5_access},
210 {0x21C2103C, "Smart Array P440", &SA5_access},
211 {0x21C3103C, "Smart Array P441", &SA5_access},
212 {0x21C4103C, "Smart Array", &SA5_access},
213 {0x21C5103C, "Smart Array P841", &SA5_access},
214 {0x21C6103C, "Smart HBA H244br", &SA5_access},
215 {0x21C7103C, "Smart HBA H240", &SA5_access},
216 {0x21C8103C, "Smart HBA H241", &SA5_access},
217 {0x21C9103C, "Smart Array", &SA5_access},
218 {0x21CA103C, "Smart Array P246br", &SA5_access},
219 {0x21CB103C, "Smart Array P840", &SA5_access},
220 {0x21CC103C, "Smart Array", &SA5_access},
221 {0x21CD103C, "Smart Array", &SA5_access},
222 {0x21CE103C, "Smart HBA", &SA5_access},
223 {0x05809005, "SmartHBA-SA", &SA5_access},
224 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
225 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
226 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
227 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
228 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
229 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
230 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
231 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
232 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
233 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
234 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
235 };
236
237 static struct scsi_transport_template *hpsa_sas_transport_template;
238 static int hpsa_add_sas_host(struct ctlr_info *h);
239 static void hpsa_delete_sas_host(struct ctlr_info *h);
240 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
241 struct hpsa_scsi_dev_t *device);
242 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
243 static struct hpsa_scsi_dev_t
244 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
245 struct sas_rphy *rphy);
246
247 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
248 static const struct scsi_cmnd hpsa_cmd_busy;
249 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
250 static const struct scsi_cmnd hpsa_cmd_idle;
251 static int number_of_controllers;
252
253 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
254 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
255 static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
256 void __user *arg);
257
258 #ifdef CONFIG_COMPAT
259 static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
260 void __user *arg);
261 #endif
262
263 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
264 static struct CommandList *cmd_alloc(struct ctlr_info *h);
265 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
266 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
267 struct scsi_cmnd *scmd);
268 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
269 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
270 int cmd_type);
271 static void hpsa_free_cmd_pool(struct ctlr_info *h);
272 #define VPD_PAGE (1 << 8)
273 #define HPSA_SIMPLE_ERROR_BITS 0x03
274
275 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
276 static void hpsa_scan_start(struct Scsi_Host *);
277 static int hpsa_scan_finished(struct Scsi_Host *sh,
278 unsigned long elapsed_time);
279 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
280
281 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
282 static int hpsa_slave_alloc(struct scsi_device *sdev);
283 static int hpsa_slave_configure(struct scsi_device *sdev);
284 static void hpsa_slave_destroy(struct scsi_device *sdev);
285
286 static void hpsa_update_scsi_devices(struct ctlr_info *h);
287 static int check_for_unit_attention(struct ctlr_info *h,
288 struct CommandList *c);
289 static void check_ioctl_unit_attention(struct ctlr_info *h,
290 struct CommandList *c);
291
292 static void calc_bucket_map(int *bucket, int num_buckets,
293 int nsgs, int min_blocks, u32 *bucket_map);
294 static void hpsa_free_performant_mode(struct ctlr_info *h);
295 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
296 static inline u32 next_command(struct ctlr_info *h, u8 q);
297 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
298 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
299 u64 *cfg_offset);
300 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
301 unsigned long *memory_bar);
302 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
303 bool *legacy_board);
304 static int wait_for_device_to_become_ready(struct ctlr_info *h,
305 unsigned char lunaddr[],
306 int reply_queue);
307 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
308 int wait_for_ready);
309 static inline void finish_cmd(struct CommandList *c);
310 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
311 #define BOARD_NOT_READY 0
312 #define BOARD_READY 1
313 static void hpsa_drain_accel_commands(struct ctlr_info *h);
314 static void hpsa_flush_cache(struct ctlr_info *h);
315 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
316 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
317 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
318 static void hpsa_command_resubmit_worker(struct work_struct *work);
319 static u32 lockup_detected(struct ctlr_info *h);
320 static int detect_controller_lockup(struct ctlr_info *h);
321 static void hpsa_disable_rld_caching(struct ctlr_info *h);
322 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
323 struct ReportExtendedLUNdata *buf, int bufsize);
324 static bool hpsa_vpd_page_supported(struct ctlr_info *h,
325 unsigned char scsi3addr[], u8 page);
326 static int hpsa_luns_changed(struct ctlr_info *h);
327 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
328 struct hpsa_scsi_dev_t *dev,
329 unsigned char *scsi3addr);
330
331 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
332 {
333 unsigned long *priv = shost_priv(sdev->host);
334 return (struct ctlr_info *) *priv;
335 }
336
337 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
338 {
339 unsigned long *priv = shost_priv(sh);
340 return (struct ctlr_info *) *priv;
341 }
342
343 static inline bool hpsa_is_cmd_idle(struct CommandList *c)
344 {
345 return c->scsi_cmd == SCSI_CMD_IDLE;
346 }
347
348
349 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
350 u8 *sense_key, u8 *asc, u8 *ascq)
351 {
352 struct scsi_sense_hdr sshdr;
353 bool rc;
354
355 *sense_key = -1;
356 *asc = -1;
357 *ascq = -1;
358
359 if (sense_data_len < 1)
360 return;
361
362 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
363 if (rc) {
364 *sense_key = sshdr.sense_key;
365 *asc = sshdr.asc;
366 *ascq = sshdr.ascq;
367 }
368 }
369
370 static int check_for_unit_attention(struct ctlr_info *h,
371 struct CommandList *c)
372 {
373 u8 sense_key, asc, ascq;
374 int sense_len;
375
376 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
377 sense_len = sizeof(c->err_info->SenseInfo);
378 else
379 sense_len = c->err_info->SenseLen;
380
381 decode_sense_data(c->err_info->SenseInfo, sense_len,
382 &sense_key, &asc, &ascq);
383 if (sense_key != UNIT_ATTENTION || asc == 0xff)
384 return 0;
385
386 switch (asc) {
387 case STATE_CHANGED:
388 dev_warn(&h->pdev->dev,
389 "%s: a state change detected, command retried\n",
390 h->devname);
391 break;
392 case LUN_FAILED:
393 dev_warn(&h->pdev->dev,
394 "%s: LUN failure detected\n", h->devname);
395 break;
396 case REPORT_LUNS_CHANGED:
397 dev_warn(&h->pdev->dev,
398 "%s: report LUN data changed\n", h->devname);
399
400
401
402
403 break;
404 case POWER_OR_RESET:
405 dev_warn(&h->pdev->dev,
406 "%s: a power on or device reset detected\n",
407 h->devname);
408 break;
409 case UNIT_ATTENTION_CLEARED:
410 dev_warn(&h->pdev->dev,
411 "%s: unit attention cleared by another initiator\n",
412 h->devname);
413 break;
414 default:
415 dev_warn(&h->pdev->dev,
416 "%s: unknown unit attention detected\n",
417 h->devname);
418 break;
419 }
420 return 1;
421 }
422
423 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
424 {
425 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
426 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
427 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
428 return 0;
429 dev_warn(&h->pdev->dev, HPSA "device busy");
430 return 1;
431 }
432
433 static u32 lockup_detected(struct ctlr_info *h);
434 static ssize_t host_show_lockup_detected(struct device *dev,
435 struct device_attribute *attr, char *buf)
436 {
437 int ld;
438 struct ctlr_info *h;
439 struct Scsi_Host *shost = class_to_shost(dev);
440
441 h = shost_to_hba(shost);
442 ld = lockup_detected(h);
443
444 return sprintf(buf, "ld=%d\n", ld);
445 }
446
447 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
448 struct device_attribute *attr,
449 const char *buf, size_t count)
450 {
451 int status, len;
452 struct ctlr_info *h;
453 struct Scsi_Host *shost = class_to_shost(dev);
454 char tmpbuf[10];
455
456 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
457 return -EACCES;
458 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
459 strncpy(tmpbuf, buf, len);
460 tmpbuf[len] = '\0';
461 if (sscanf(tmpbuf, "%d", &status) != 1)
462 return -EINVAL;
463 h = shost_to_hba(shost);
464 h->acciopath_status = !!status;
465 dev_warn(&h->pdev->dev,
466 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
467 h->acciopath_status ? "enabled" : "disabled");
468 return count;
469 }
470
471 static ssize_t host_store_raid_offload_debug(struct device *dev,
472 struct device_attribute *attr,
473 const char *buf, size_t count)
474 {
475 int debug_level, len;
476 struct ctlr_info *h;
477 struct Scsi_Host *shost = class_to_shost(dev);
478 char tmpbuf[10];
479
480 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
481 return -EACCES;
482 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
483 strncpy(tmpbuf, buf, len);
484 tmpbuf[len] = '\0';
485 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
486 return -EINVAL;
487 if (debug_level < 0)
488 debug_level = 0;
489 h = shost_to_hba(shost);
490 h->raid_offload_debug = debug_level;
491 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
492 h->raid_offload_debug);
493 return count;
494 }
495
496 static ssize_t host_store_rescan(struct device *dev,
497 struct device_attribute *attr,
498 const char *buf, size_t count)
499 {
500 struct ctlr_info *h;
501 struct Scsi_Host *shost = class_to_shost(dev);
502 h = shost_to_hba(shost);
503 hpsa_scan_start(h->scsi_host);
504 return count;
505 }
506
507 static ssize_t host_show_firmware_revision(struct device *dev,
508 struct device_attribute *attr, char *buf)
509 {
510 struct ctlr_info *h;
511 struct Scsi_Host *shost = class_to_shost(dev);
512 unsigned char *fwrev;
513
514 h = shost_to_hba(shost);
515 if (!h->hba_inquiry_data)
516 return 0;
517 fwrev = &h->hba_inquiry_data[32];
518 return snprintf(buf, 20, "%c%c%c%c\n",
519 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
520 }
521
522 static ssize_t host_show_commands_outstanding(struct device *dev,
523 struct device_attribute *attr, char *buf)
524 {
525 struct Scsi_Host *shost = class_to_shost(dev);
526 struct ctlr_info *h = shost_to_hba(shost);
527
528 return snprintf(buf, 20, "%d\n",
529 atomic_read(&h->commands_outstanding));
530 }
531
532 static ssize_t host_show_transport_mode(struct device *dev,
533 struct device_attribute *attr, char *buf)
534 {
535 struct ctlr_info *h;
536 struct Scsi_Host *shost = class_to_shost(dev);
537
538 h = shost_to_hba(shost);
539 return snprintf(buf, 20, "%s\n",
540 h->transMethod & CFGTBL_Trans_Performant ?
541 "performant" : "simple");
542 }
543
544 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
545 struct device_attribute *attr, char *buf)
546 {
547 struct ctlr_info *h;
548 struct Scsi_Host *shost = class_to_shost(dev);
549
550 h = shost_to_hba(shost);
551 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
552 (h->acciopath_status == 1) ? "enabled" : "disabled");
553 }
554
555
556 static u32 unresettable_controller[] = {
557 0x324a103C,
558 0x324b103C,
559 0x3223103C,
560 0x3234103C,
561 0x3235103C,
562 0x3211103C,
563 0x3212103C,
564 0x3213103C,
565 0x3214103C,
566 0x3215103C,
567 0x3237103C,
568 0x323D103C,
569 0x40800E11,
570 0x409C0E11,
571 0x409D0E11,
572 0x40700E11,
573 0x40820E11,
574 0x40830E11,
575 0x409A0E11,
576 0x409B0E11,
577 0x40910E11,
578 };
579
580
581 static u32 soft_unresettable_controller[] = {
582 0x40800E11,
583 0x40700E11,
584 0x40820E11,
585 0x40830E11,
586 0x409A0E11,
587 0x409B0E11,
588 0x40910E11,
589
590
591
592
593
594
595
596 0x409C0E11,
597 0x409D0E11,
598 };
599
600 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
601 {
602 int i;
603
604 for (i = 0; i < nelems; i++)
605 if (a[i] == board_id)
606 return 1;
607 return 0;
608 }
609
610 static int ctlr_is_hard_resettable(u32 board_id)
611 {
612 return !board_id_in_array(unresettable_controller,
613 ARRAY_SIZE(unresettable_controller), board_id);
614 }
615
616 static int ctlr_is_soft_resettable(u32 board_id)
617 {
618 return !board_id_in_array(soft_unresettable_controller,
619 ARRAY_SIZE(soft_unresettable_controller), board_id);
620 }
621
622 static int ctlr_is_resettable(u32 board_id)
623 {
624 return ctlr_is_hard_resettable(board_id) ||
625 ctlr_is_soft_resettable(board_id);
626 }
627
628 static ssize_t host_show_resettable(struct device *dev,
629 struct device_attribute *attr, char *buf)
630 {
631 struct ctlr_info *h;
632 struct Scsi_Host *shost = class_to_shost(dev);
633
634 h = shost_to_hba(shost);
635 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
636 }
637
638 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
639 {
640 return (scsi3addr[3] & 0xC0) == 0x40;
641 }
642
643 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
644 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
645 };
646 #define HPSA_RAID_0 0
647 #define HPSA_RAID_4 1
648 #define HPSA_RAID_1 2
649 #define HPSA_RAID_5 3
650 #define HPSA_RAID_51 4
651 #define HPSA_RAID_6 5
652 #define HPSA_RAID_ADM 6
653 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
654 #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
655
656 static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
657 {
658 return !device->physical_device;
659 }
660
661 static ssize_t raid_level_show(struct device *dev,
662 struct device_attribute *attr, char *buf)
663 {
664 ssize_t l = 0;
665 unsigned char rlevel;
666 struct ctlr_info *h;
667 struct scsi_device *sdev;
668 struct hpsa_scsi_dev_t *hdev;
669 unsigned long flags;
670
671 sdev = to_scsi_device(dev);
672 h = sdev_to_hba(sdev);
673 spin_lock_irqsave(&h->lock, flags);
674 hdev = sdev->hostdata;
675 if (!hdev) {
676 spin_unlock_irqrestore(&h->lock, flags);
677 return -ENODEV;
678 }
679
680
681 if (!is_logical_device(hdev)) {
682 spin_unlock_irqrestore(&h->lock, flags);
683 l = snprintf(buf, PAGE_SIZE, "N/A\n");
684 return l;
685 }
686
687 rlevel = hdev->raid_level;
688 spin_unlock_irqrestore(&h->lock, flags);
689 if (rlevel > RAID_UNKNOWN)
690 rlevel = RAID_UNKNOWN;
691 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
692 return l;
693 }
694
695 static ssize_t lunid_show(struct device *dev,
696 struct device_attribute *attr, char *buf)
697 {
698 struct ctlr_info *h;
699 struct scsi_device *sdev;
700 struct hpsa_scsi_dev_t *hdev;
701 unsigned long flags;
702 unsigned char lunid[8];
703
704 sdev = to_scsi_device(dev);
705 h = sdev_to_hba(sdev);
706 spin_lock_irqsave(&h->lock, flags);
707 hdev = sdev->hostdata;
708 if (!hdev) {
709 spin_unlock_irqrestore(&h->lock, flags);
710 return -ENODEV;
711 }
712 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
713 spin_unlock_irqrestore(&h->lock, flags);
714 return snprintf(buf, 20, "0x%8phN\n", lunid);
715 }
716
717 static ssize_t unique_id_show(struct device *dev,
718 struct device_attribute *attr, char *buf)
719 {
720 struct ctlr_info *h;
721 struct scsi_device *sdev;
722 struct hpsa_scsi_dev_t *hdev;
723 unsigned long flags;
724 unsigned char sn[16];
725
726 sdev = to_scsi_device(dev);
727 h = sdev_to_hba(sdev);
728 spin_lock_irqsave(&h->lock, flags);
729 hdev = sdev->hostdata;
730 if (!hdev) {
731 spin_unlock_irqrestore(&h->lock, flags);
732 return -ENODEV;
733 }
734 memcpy(sn, hdev->device_id, sizeof(sn));
735 spin_unlock_irqrestore(&h->lock, flags);
736 return snprintf(buf, 16 * 2 + 2,
737 "%02X%02X%02X%02X%02X%02X%02X%02X"
738 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
739 sn[0], sn[1], sn[2], sn[3],
740 sn[4], sn[5], sn[6], sn[7],
741 sn[8], sn[9], sn[10], sn[11],
742 sn[12], sn[13], sn[14], sn[15]);
743 }
744
745 static ssize_t sas_address_show(struct device *dev,
746 struct device_attribute *attr, char *buf)
747 {
748 struct ctlr_info *h;
749 struct scsi_device *sdev;
750 struct hpsa_scsi_dev_t *hdev;
751 unsigned long flags;
752 u64 sas_address;
753
754 sdev = to_scsi_device(dev);
755 h = sdev_to_hba(sdev);
756 spin_lock_irqsave(&h->lock, flags);
757 hdev = sdev->hostdata;
758 if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
759 spin_unlock_irqrestore(&h->lock, flags);
760 return -ENODEV;
761 }
762 sas_address = hdev->sas_address;
763 spin_unlock_irqrestore(&h->lock, flags);
764
765 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
766 }
767
768 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
769 struct device_attribute *attr, char *buf)
770 {
771 struct ctlr_info *h;
772 struct scsi_device *sdev;
773 struct hpsa_scsi_dev_t *hdev;
774 unsigned long flags;
775 int offload_enabled;
776
777 sdev = to_scsi_device(dev);
778 h = sdev_to_hba(sdev);
779 spin_lock_irqsave(&h->lock, flags);
780 hdev = sdev->hostdata;
781 if (!hdev) {
782 spin_unlock_irqrestore(&h->lock, flags);
783 return -ENODEV;
784 }
785 offload_enabled = hdev->offload_enabled;
786 spin_unlock_irqrestore(&h->lock, flags);
787
788 if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
789 return snprintf(buf, 20, "%d\n", offload_enabled);
790 else
791 return snprintf(buf, 40, "%s\n",
792 "Not applicable for a controller");
793 }
794
795 #define MAX_PATHS 8
796 static ssize_t path_info_show(struct device *dev,
797 struct device_attribute *attr, char *buf)
798 {
799 struct ctlr_info *h;
800 struct scsi_device *sdev;
801 struct hpsa_scsi_dev_t *hdev;
802 unsigned long flags;
803 int i;
804 int output_len = 0;
805 u8 box;
806 u8 bay;
807 u8 path_map_index = 0;
808 char *active;
809 unsigned char phys_connector[2];
810
811 sdev = to_scsi_device(dev);
812 h = sdev_to_hba(sdev);
813 spin_lock_irqsave(&h->devlock, flags);
814 hdev = sdev->hostdata;
815 if (!hdev) {
816 spin_unlock_irqrestore(&h->devlock, flags);
817 return -ENODEV;
818 }
819
820 bay = hdev->bay;
821 for (i = 0; i < MAX_PATHS; i++) {
822 path_map_index = 1<<i;
823 if (i == hdev->active_path_index)
824 active = "Active";
825 else if (hdev->path_map & path_map_index)
826 active = "Inactive";
827 else
828 continue;
829
830 output_len += scnprintf(buf + output_len,
831 PAGE_SIZE - output_len,
832 "[%d:%d:%d:%d] %20.20s ",
833 h->scsi_host->host_no,
834 hdev->bus, hdev->target, hdev->lun,
835 scsi_device_type(hdev->devtype));
836
837 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
838 output_len += scnprintf(buf + output_len,
839 PAGE_SIZE - output_len,
840 "%s\n", active);
841 continue;
842 }
843
844 box = hdev->box[i];
845 memcpy(&phys_connector, &hdev->phys_connector[i],
846 sizeof(phys_connector));
847 if (phys_connector[0] < '0')
848 phys_connector[0] = '0';
849 if (phys_connector[1] < '0')
850 phys_connector[1] = '0';
851 output_len += scnprintf(buf + output_len,
852 PAGE_SIZE - output_len,
853 "PORT: %.2s ",
854 phys_connector);
855 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
856 hdev->expose_device) {
857 if (box == 0 || box == 0xFF) {
858 output_len += scnprintf(buf + output_len,
859 PAGE_SIZE - output_len,
860 "BAY: %hhu %s\n",
861 bay, active);
862 } else {
863 output_len += scnprintf(buf + output_len,
864 PAGE_SIZE - output_len,
865 "BOX: %hhu BAY: %hhu %s\n",
866 box, bay, active);
867 }
868 } else if (box != 0 && box != 0xFF) {
869 output_len += scnprintf(buf + output_len,
870 PAGE_SIZE - output_len, "BOX: %hhu %s\n",
871 box, active);
872 } else
873 output_len += scnprintf(buf + output_len,
874 PAGE_SIZE - output_len, "%s\n", active);
875 }
876
877 spin_unlock_irqrestore(&h->devlock, flags);
878 return output_len;
879 }
880
881 static ssize_t host_show_ctlr_num(struct device *dev,
882 struct device_attribute *attr, char *buf)
883 {
884 struct ctlr_info *h;
885 struct Scsi_Host *shost = class_to_shost(dev);
886
887 h = shost_to_hba(shost);
888 return snprintf(buf, 20, "%d\n", h->ctlr);
889 }
890
891 static ssize_t host_show_legacy_board(struct device *dev,
892 struct device_attribute *attr, char *buf)
893 {
894 struct ctlr_info *h;
895 struct Scsi_Host *shost = class_to_shost(dev);
896
897 h = shost_to_hba(shost);
898 return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
899 }
900
901 static DEVICE_ATTR_RO(raid_level);
902 static DEVICE_ATTR_RO(lunid);
903 static DEVICE_ATTR_RO(unique_id);
904 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
905 static DEVICE_ATTR_RO(sas_address);
906 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
907 host_show_hp_ssd_smart_path_enabled, NULL);
908 static DEVICE_ATTR_RO(path_info);
909 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
910 host_show_hp_ssd_smart_path_status,
911 host_store_hp_ssd_smart_path_status);
912 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
913 host_store_raid_offload_debug);
914 static DEVICE_ATTR(firmware_revision, S_IRUGO,
915 host_show_firmware_revision, NULL);
916 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
917 host_show_commands_outstanding, NULL);
918 static DEVICE_ATTR(transport_mode, S_IRUGO,
919 host_show_transport_mode, NULL);
920 static DEVICE_ATTR(resettable, S_IRUGO,
921 host_show_resettable, NULL);
922 static DEVICE_ATTR(lockup_detected, S_IRUGO,
923 host_show_lockup_detected, NULL);
924 static DEVICE_ATTR(ctlr_num, S_IRUGO,
925 host_show_ctlr_num, NULL);
926 static DEVICE_ATTR(legacy_board, S_IRUGO,
927 host_show_legacy_board, NULL);
928
929 static struct device_attribute *hpsa_sdev_attrs[] = {
930 &dev_attr_raid_level,
931 &dev_attr_lunid,
932 &dev_attr_unique_id,
933 &dev_attr_hp_ssd_smart_path_enabled,
934 &dev_attr_path_info,
935 &dev_attr_sas_address,
936 NULL,
937 };
938
939 static struct device_attribute *hpsa_shost_attrs[] = {
940 &dev_attr_rescan,
941 &dev_attr_firmware_revision,
942 &dev_attr_commands_outstanding,
943 &dev_attr_transport_mode,
944 &dev_attr_resettable,
945 &dev_attr_hp_ssd_smart_path_status,
946 &dev_attr_raid_offload_debug,
947 &dev_attr_lockup_detected,
948 &dev_attr_ctlr_num,
949 &dev_attr_legacy_board,
950 NULL,
951 };
952
953 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
954 HPSA_MAX_CONCURRENT_PASSTHRUS)
955
956 static struct scsi_host_template hpsa_driver_template = {
957 .module = THIS_MODULE,
958 .name = HPSA,
959 .proc_name = HPSA,
960 .queuecommand = hpsa_scsi_queue_command,
961 .scan_start = hpsa_scan_start,
962 .scan_finished = hpsa_scan_finished,
963 .change_queue_depth = hpsa_change_queue_depth,
964 .this_id = -1,
965 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
966 .ioctl = hpsa_ioctl,
967 .slave_alloc = hpsa_slave_alloc,
968 .slave_configure = hpsa_slave_configure,
969 .slave_destroy = hpsa_slave_destroy,
970 #ifdef CONFIG_COMPAT
971 .compat_ioctl = hpsa_compat_ioctl,
972 #endif
973 .sdev_attrs = hpsa_sdev_attrs,
974 .shost_attrs = hpsa_shost_attrs,
975 .max_sectors = 2048,
976 .no_write_same = 1,
977 };
978
979 static inline u32 next_command(struct ctlr_info *h, u8 q)
980 {
981 u32 a;
982 struct reply_queue_buffer *rq = &h->reply_queue[q];
983
984 if (h->transMethod & CFGTBL_Trans_io_accel1)
985 return h->access.command_completed(h, q);
986
987 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
988 return h->access.command_completed(h, q);
989
990 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
991 a = rq->head[rq->current_entry];
992 rq->current_entry++;
993 atomic_dec(&h->commands_outstanding);
994 } else {
995 a = FIFO_EMPTY;
996 }
997
998 if (rq->current_entry == h->max_commands) {
999 rq->current_entry = 0;
1000 rq->wraparound ^= 1;
1001 }
1002 return a;
1003 }
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036 #define DEFAULT_REPLY_QUEUE (-1)
1037 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1038 int reply_queue)
1039 {
1040 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1041 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1042 if (unlikely(!h->msix_vectors))
1043 return;
1044 c->Header.ReplyQueue = reply_queue;
1045 }
1046 }
1047
1048 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1049 struct CommandList *c,
1050 int reply_queue)
1051 {
1052 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1053
1054
1055
1056
1057
1058 cp->ReplyQueue = reply_queue;
1059
1060
1061
1062
1063
1064
1065 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1066 IOACCEL1_BUSADDR_CMDTYPE;
1067 }
1068
1069 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1070 struct CommandList *c,
1071 int reply_queue)
1072 {
1073 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1074 &h->ioaccel2_cmd_pool[c->cmdindex];
1075
1076
1077
1078
1079 cp->reply_queue = reply_queue;
1080
1081
1082
1083
1084
1085 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1086 }
1087
1088 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1089 struct CommandList *c,
1090 int reply_queue)
1091 {
1092 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1093
1094
1095
1096
1097
1098 cp->reply_queue = reply_queue;
1099
1100
1101
1102
1103
1104
1105 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1106 }
1107
1108 static int is_firmware_flash_cmd(u8 *cdb)
1109 {
1110 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1111 }
1112
1113
1114
1115
1116
1117
1118 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1119 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1120 #define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
1121 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1122 struct CommandList *c)
1123 {
1124 if (!is_firmware_flash_cmd(c->Request.CDB))
1125 return;
1126 atomic_inc(&h->firmware_flash_in_progress);
1127 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1128 }
1129
1130 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1131 struct CommandList *c)
1132 {
1133 if (is_firmware_flash_cmd(c->Request.CDB) &&
1134 atomic_dec_and_test(&h->firmware_flash_in_progress))
1135 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1136 }
1137
1138 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1139 struct CommandList *c, int reply_queue)
1140 {
1141 dial_down_lockup_detection_during_fw_flash(h, c);
1142 atomic_inc(&h->commands_outstanding);
1143 if (c->device)
1144 atomic_inc(&c->device->commands_outstanding);
1145
1146 reply_queue = h->reply_map[raw_smp_processor_id()];
1147 switch (c->cmd_type) {
1148 case CMD_IOACCEL1:
1149 set_ioaccel1_performant_mode(h, c, reply_queue);
1150 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1151 break;
1152 case CMD_IOACCEL2:
1153 set_ioaccel2_performant_mode(h, c, reply_queue);
1154 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1155 break;
1156 case IOACCEL2_TMF:
1157 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1158 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1159 break;
1160 default:
1161 set_performant_mode(h, c, reply_queue);
1162 h->access.submit_command(h, c);
1163 }
1164 }
1165
1166 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1167 {
1168 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1169 }
1170
1171 static inline int is_hba_lunid(unsigned char scsi3addr[])
1172 {
1173 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1174 }
1175
1176 static inline int is_scsi_rev_5(struct ctlr_info *h)
1177 {
1178 if (!h->hba_inquiry_data)
1179 return 0;
1180 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1181 return 1;
1182 return 0;
1183 }
1184
1185 static int hpsa_find_target_lun(struct ctlr_info *h,
1186 unsigned char scsi3addr[], int bus, int *target, int *lun)
1187 {
1188
1189
1190
1191 int i, found = 0;
1192 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1193
1194 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1195
1196 for (i = 0; i < h->ndevices; i++) {
1197 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1198 __set_bit(h->dev[i]->target, lun_taken);
1199 }
1200
1201 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1202 if (i < HPSA_MAX_DEVICES) {
1203
1204 *target = i;
1205 *lun = 0;
1206 found = 1;
1207 }
1208 return !found;
1209 }
1210
1211 static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1212 struct hpsa_scsi_dev_t *dev, char *description)
1213 {
1214 #define LABEL_SIZE 25
1215 char label[LABEL_SIZE];
1216
1217 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1218 return;
1219
1220 switch (dev->devtype) {
1221 case TYPE_RAID:
1222 snprintf(label, LABEL_SIZE, "controller");
1223 break;
1224 case TYPE_ENCLOSURE:
1225 snprintf(label, LABEL_SIZE, "enclosure");
1226 break;
1227 case TYPE_DISK:
1228 case TYPE_ZBC:
1229 if (dev->external)
1230 snprintf(label, LABEL_SIZE, "external");
1231 else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1232 snprintf(label, LABEL_SIZE, "%s",
1233 raid_label[PHYSICAL_DRIVE]);
1234 else
1235 snprintf(label, LABEL_SIZE, "RAID-%s",
1236 dev->raid_level > RAID_UNKNOWN ? "?" :
1237 raid_label[dev->raid_level]);
1238 break;
1239 case TYPE_ROM:
1240 snprintf(label, LABEL_SIZE, "rom");
1241 break;
1242 case TYPE_TAPE:
1243 snprintf(label, LABEL_SIZE, "tape");
1244 break;
1245 case TYPE_MEDIUM_CHANGER:
1246 snprintf(label, LABEL_SIZE, "changer");
1247 break;
1248 default:
1249 snprintf(label, LABEL_SIZE, "UNKNOWN");
1250 break;
1251 }
1252
1253 dev_printk(level, &h->pdev->dev,
1254 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1255 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1256 description,
1257 scsi_device_type(dev->devtype),
1258 dev->vendor,
1259 dev->model,
1260 label,
1261 dev->offload_config ? '+' : '-',
1262 dev->offload_to_be_enabled ? '+' : '-',
1263 dev->expose_device);
1264 }
1265
1266
1267 static int hpsa_scsi_add_entry(struct ctlr_info *h,
1268 struct hpsa_scsi_dev_t *device,
1269 struct hpsa_scsi_dev_t *added[], int *nadded)
1270 {
1271
1272 int n = h->ndevices;
1273 int i;
1274 unsigned char addr1[8], addr2[8];
1275 struct hpsa_scsi_dev_t *sd;
1276
1277 if (n >= HPSA_MAX_DEVICES) {
1278 dev_err(&h->pdev->dev, "too many devices, some will be "
1279 "inaccessible.\n");
1280 return -1;
1281 }
1282
1283
1284 if (device->lun != -1)
1285
1286 goto lun_assigned;
1287
1288
1289
1290
1291
1292 if (device->scsi3addr[4] == 0) {
1293
1294 if (hpsa_find_target_lun(h, device->scsi3addr,
1295 device->bus, &device->target, &device->lun) != 0)
1296 return -1;
1297 goto lun_assigned;
1298 }
1299
1300
1301
1302
1303
1304
1305
1306 memcpy(addr1, device->scsi3addr, 8);
1307 addr1[4] = 0;
1308 addr1[5] = 0;
1309 for (i = 0; i < n; i++) {
1310 sd = h->dev[i];
1311 memcpy(addr2, sd->scsi3addr, 8);
1312 addr2[4] = 0;
1313 addr2[5] = 0;
1314
1315 if (memcmp(addr1, addr2, 8) == 0) {
1316 device->bus = sd->bus;
1317 device->target = sd->target;
1318 device->lun = device->scsi3addr[4];
1319 break;
1320 }
1321 }
1322 if (device->lun == -1) {
1323 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1324 " suspect firmware bug or unsupported hardware "
1325 "configuration.\n");
1326 return -1;
1327 }
1328
1329 lun_assigned:
1330
1331 h->dev[n] = device;
1332 h->ndevices++;
1333 added[*nadded] = device;
1334 (*nadded)++;
1335 hpsa_show_dev_msg(KERN_INFO, h, device,
1336 device->expose_device ? "added" : "masked");
1337 return 0;
1338 }
1339
1340
1341
1342
1343
1344
1345 static void hpsa_scsi_update_entry(struct ctlr_info *h,
1346 int entry, struct hpsa_scsi_dev_t *new_entry)
1347 {
1348
1349 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1350
1351
1352 h->dev[entry]->raid_level = new_entry->raid_level;
1353
1354
1355
1356
1357 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1358
1359
1360 if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
1361
1362
1363
1364
1365
1366
1367
1368
1369 h->dev[entry]->raid_map = new_entry->raid_map;
1370 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1371 }
1372 if (new_entry->offload_to_be_enabled) {
1373 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1374 wmb();
1375 }
1376 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1377 h->dev[entry]->offload_config = new_entry->offload_config;
1378 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1379 h->dev[entry]->queue_depth = new_entry->queue_depth;
1380
1381
1382
1383
1384
1385
1386 h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
1387
1388
1389
1390
1391 if (!new_entry->offload_to_be_enabled)
1392 h->dev[entry]->offload_enabled = 0;
1393
1394 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1395 }
1396
1397
1398 static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1399 int entry, struct hpsa_scsi_dev_t *new_entry,
1400 struct hpsa_scsi_dev_t *added[], int *nadded,
1401 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1402 {
1403
1404 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1405 removed[*nremoved] = h->dev[entry];
1406 (*nremoved)++;
1407
1408
1409
1410
1411
1412 if (new_entry->target == -1) {
1413 new_entry->target = h->dev[entry]->target;
1414 new_entry->lun = h->dev[entry]->lun;
1415 }
1416
1417 h->dev[entry] = new_entry;
1418 added[*nadded] = new_entry;
1419 (*nadded)++;
1420
1421 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1422 }
1423
1424
1425 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1426 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1427 {
1428
1429 int i;
1430 struct hpsa_scsi_dev_t *sd;
1431
1432 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1433
1434 sd = h->dev[entry];
1435 removed[*nremoved] = h->dev[entry];
1436 (*nremoved)++;
1437
1438 for (i = entry; i < h->ndevices-1; i++)
1439 h->dev[i] = h->dev[i+1];
1440 h->ndevices--;
1441 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1442 }
1443
1444 #define SCSI3ADDR_EQ(a, b) ( \
1445 (a)[7] == (b)[7] && \
1446 (a)[6] == (b)[6] && \
1447 (a)[5] == (b)[5] && \
1448 (a)[4] == (b)[4] && \
1449 (a)[3] == (b)[3] && \
1450 (a)[2] == (b)[2] && \
1451 (a)[1] == (b)[1] && \
1452 (a)[0] == (b)[0])
1453
1454 static void fixup_botched_add(struct ctlr_info *h,
1455 struct hpsa_scsi_dev_t *added)
1456 {
1457
1458
1459
1460 unsigned long flags;
1461 int i, j;
1462
1463 spin_lock_irqsave(&h->lock, flags);
1464 for (i = 0; i < h->ndevices; i++) {
1465 if (h->dev[i] == added) {
1466 for (j = i; j < h->ndevices-1; j++)
1467 h->dev[j] = h->dev[j+1];
1468 h->ndevices--;
1469 break;
1470 }
1471 }
1472 spin_unlock_irqrestore(&h->lock, flags);
1473 kfree(added);
1474 }
1475
1476 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1477 struct hpsa_scsi_dev_t *dev2)
1478 {
1479
1480
1481
1482
1483 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1484 sizeof(dev1->scsi3addr)) != 0)
1485 return 0;
1486 if (memcmp(dev1->device_id, dev2->device_id,
1487 sizeof(dev1->device_id)) != 0)
1488 return 0;
1489 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1490 return 0;
1491 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1492 return 0;
1493 if (dev1->devtype != dev2->devtype)
1494 return 0;
1495 if (dev1->bus != dev2->bus)
1496 return 0;
1497 return 1;
1498 }
1499
1500 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1501 struct hpsa_scsi_dev_t *dev2)
1502 {
1503
1504
1505
1506
1507 if (dev1->raid_level != dev2->raid_level)
1508 return 1;
1509 if (dev1->offload_config != dev2->offload_config)
1510 return 1;
1511 if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
1512 return 1;
1513 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1514 if (dev1->queue_depth != dev2->queue_depth)
1515 return 1;
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525 if (dev1->ioaccel_handle != dev2->ioaccel_handle)
1526 return 1;
1527 return 0;
1528 }
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1539 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1540 int *index)
1541 {
1542 int i;
1543 #define DEVICE_NOT_FOUND 0
1544 #define DEVICE_CHANGED 1
1545 #define DEVICE_SAME 2
1546 #define DEVICE_UPDATED 3
1547 if (needle == NULL)
1548 return DEVICE_NOT_FOUND;
1549
1550 for (i = 0; i < haystack_size; i++) {
1551 if (haystack[i] == NULL)
1552 continue;
1553 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1554 *index = i;
1555 if (device_is_the_same(needle, haystack[i])) {
1556 if (device_updated(needle, haystack[i]))
1557 return DEVICE_UPDATED;
1558 return DEVICE_SAME;
1559 } else {
1560
1561 if (needle->volume_offline)
1562 return DEVICE_NOT_FOUND;
1563 return DEVICE_CHANGED;
1564 }
1565 }
1566 }
1567 *index = -1;
1568 return DEVICE_NOT_FOUND;
1569 }
1570
1571 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1572 unsigned char scsi3addr[])
1573 {
1574 struct offline_device_entry *device;
1575 unsigned long flags;
1576
1577
1578 spin_lock_irqsave(&h->offline_device_lock, flags);
1579 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1580 if (memcmp(device->scsi3addr, scsi3addr,
1581 sizeof(device->scsi3addr)) == 0) {
1582 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1583 return;
1584 }
1585 }
1586 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1587
1588
1589 device = kmalloc(sizeof(*device), GFP_KERNEL);
1590 if (!device)
1591 return;
1592
1593 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1594 spin_lock_irqsave(&h->offline_device_lock, flags);
1595 list_add_tail(&device->offline_list, &h->offline_device_list);
1596 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1597 }
1598
1599
1600 static void hpsa_show_volume_status(struct ctlr_info *h,
1601 struct hpsa_scsi_dev_t *sd)
1602 {
1603 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1604 dev_info(&h->pdev->dev,
1605 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1606 h->scsi_host->host_no,
1607 sd->bus, sd->target, sd->lun);
1608 switch (sd->volume_offline) {
1609 case HPSA_LV_OK:
1610 break;
1611 case HPSA_LV_UNDERGOING_ERASE:
1612 dev_info(&h->pdev->dev,
1613 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1614 h->scsi_host->host_no,
1615 sd->bus, sd->target, sd->lun);
1616 break;
1617 case HPSA_LV_NOT_AVAILABLE:
1618 dev_info(&h->pdev->dev,
1619 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1620 h->scsi_host->host_no,
1621 sd->bus, sd->target, sd->lun);
1622 break;
1623 case HPSA_LV_UNDERGOING_RPI:
1624 dev_info(&h->pdev->dev,
1625 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1626 h->scsi_host->host_no,
1627 sd->bus, sd->target, sd->lun);
1628 break;
1629 case HPSA_LV_PENDING_RPI:
1630 dev_info(&h->pdev->dev,
1631 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1632 h->scsi_host->host_no,
1633 sd->bus, sd->target, sd->lun);
1634 break;
1635 case HPSA_LV_ENCRYPTED_NO_KEY:
1636 dev_info(&h->pdev->dev,
1637 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1638 h->scsi_host->host_no,
1639 sd->bus, sd->target, sd->lun);
1640 break;
1641 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1642 dev_info(&h->pdev->dev,
1643 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1644 h->scsi_host->host_no,
1645 sd->bus, sd->target, sd->lun);
1646 break;
1647 case HPSA_LV_UNDERGOING_ENCRYPTION:
1648 dev_info(&h->pdev->dev,
1649 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1650 h->scsi_host->host_no,
1651 sd->bus, sd->target, sd->lun);
1652 break;
1653 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1654 dev_info(&h->pdev->dev,
1655 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1656 h->scsi_host->host_no,
1657 sd->bus, sd->target, sd->lun);
1658 break;
1659 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1660 dev_info(&h->pdev->dev,
1661 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1662 h->scsi_host->host_no,
1663 sd->bus, sd->target, sd->lun);
1664 break;
1665 case HPSA_LV_PENDING_ENCRYPTION:
1666 dev_info(&h->pdev->dev,
1667 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1668 h->scsi_host->host_no,
1669 sd->bus, sd->target, sd->lun);
1670 break;
1671 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1672 dev_info(&h->pdev->dev,
1673 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1674 h->scsi_host->host_no,
1675 sd->bus, sd->target, sd->lun);
1676 break;
1677 }
1678 }
1679
1680
1681
1682
1683
1684 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1685 struct hpsa_scsi_dev_t *dev[], int ndevices,
1686 struct hpsa_scsi_dev_t *logical_drive)
1687 {
1688 struct raid_map_data *map = &logical_drive->raid_map;
1689 struct raid_map_disk_data *dd = &map->data[0];
1690 int i, j;
1691 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1692 le16_to_cpu(map->metadata_disks_per_row);
1693 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1694 le16_to_cpu(map->layout_map_count) *
1695 total_disks_per_row;
1696 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1697 total_disks_per_row;
1698 int qdepth;
1699
1700 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1701 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1702
1703 logical_drive->nphysical_disks = nraid_map_entries;
1704
1705 qdepth = 0;
1706 for (i = 0; i < nraid_map_entries; i++) {
1707 logical_drive->phys_disk[i] = NULL;
1708 if (!logical_drive->offload_config)
1709 continue;
1710 for (j = 0; j < ndevices; j++) {
1711 if (dev[j] == NULL)
1712 continue;
1713 if (dev[j]->devtype != TYPE_DISK &&
1714 dev[j]->devtype != TYPE_ZBC)
1715 continue;
1716 if (is_logical_device(dev[j]))
1717 continue;
1718 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1719 continue;
1720
1721 logical_drive->phys_disk[i] = dev[j];
1722 if (i < nphys_disk)
1723 qdepth = min(h->nr_cmds, qdepth +
1724 logical_drive->phys_disk[i]->queue_depth);
1725 break;
1726 }
1727
1728
1729
1730
1731
1732
1733
1734
1735 if (!logical_drive->phys_disk[i]) {
1736 dev_warn(&h->pdev->dev,
1737 "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
1738 __func__,
1739 h->scsi_host->host_no, logical_drive->bus,
1740 logical_drive->target, logical_drive->lun);
1741 logical_drive->offload_enabled = 0;
1742 logical_drive->offload_to_be_enabled = 0;
1743 logical_drive->queue_depth = 8;
1744 }
1745 }
1746 if (nraid_map_entries)
1747
1748
1749
1750
1751 logical_drive->queue_depth = qdepth;
1752 else {
1753 if (logical_drive->external)
1754 logical_drive->queue_depth = EXTERNAL_QD;
1755 else
1756 logical_drive->queue_depth = h->nr_cmds;
1757 }
1758 }
1759
1760 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1761 struct hpsa_scsi_dev_t *dev[], int ndevices)
1762 {
1763 int i;
1764
1765 for (i = 0; i < ndevices; i++) {
1766 if (dev[i] == NULL)
1767 continue;
1768 if (dev[i]->devtype != TYPE_DISK &&
1769 dev[i]->devtype != TYPE_ZBC)
1770 continue;
1771 if (!is_logical_device(dev[i]))
1772 continue;
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793 if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
1794 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1795 }
1796 }
1797
1798 static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1799 {
1800 int rc = 0;
1801
1802 if (!h->scsi_host)
1803 return 1;
1804
1805 if (is_logical_device(device))
1806 rc = scsi_add_device(h->scsi_host, device->bus,
1807 device->target, device->lun);
1808 else
1809 rc = hpsa_add_sas_device(h->sas_host, device);
1810
1811 return rc;
1812 }
1813
1814 static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1815 struct hpsa_scsi_dev_t *dev)
1816 {
1817 int i;
1818 int count = 0;
1819
1820 for (i = 0; i < h->nr_cmds; i++) {
1821 struct CommandList *c = h->cmd_pool + i;
1822 int refcount = atomic_inc_return(&c->refcount);
1823
1824 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1825 dev->scsi3addr)) {
1826 unsigned long flags;
1827
1828 spin_lock_irqsave(&h->lock, flags);
1829 if (!hpsa_is_cmd_idle(c))
1830 ++count;
1831 spin_unlock_irqrestore(&h->lock, flags);
1832 }
1833
1834 cmd_free(h, c);
1835 }
1836
1837 return count;
1838 }
1839
1840 #define NUM_WAIT 20
1841 static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1842 struct hpsa_scsi_dev_t *device)
1843 {
1844 int cmds = 0;
1845 int waits = 0;
1846 int num_wait = NUM_WAIT;
1847
1848 if (device->external)
1849 num_wait = HPSA_EH_PTRAID_TIMEOUT;
1850
1851 while (1) {
1852 cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1853 if (cmds == 0)
1854 break;
1855 if (++waits > num_wait)
1856 break;
1857 msleep(1000);
1858 }
1859
1860 if (waits > num_wait) {
1861 dev_warn(&h->pdev->dev,
1862 "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n",
1863 __func__,
1864 h->scsi_host->host_no,
1865 device->bus, device->target, device->lun, cmds);
1866 }
1867 }
1868
1869 static void hpsa_remove_device(struct ctlr_info *h,
1870 struct hpsa_scsi_dev_t *device)
1871 {
1872 struct scsi_device *sdev = NULL;
1873
1874 if (!h->scsi_host)
1875 return;
1876
1877
1878
1879
1880 device->removed = 1;
1881 hpsa_wait_for_outstanding_commands_for_dev(h, device);
1882
1883 if (is_logical_device(device)) {
1884 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1885 device->target, device->lun);
1886 if (sdev) {
1887 scsi_remove_device(sdev);
1888 scsi_device_put(sdev);
1889 } else {
1890
1891
1892
1893
1894
1895 hpsa_show_dev_msg(KERN_WARNING, h, device,
1896 "didn't find device for removal.");
1897 }
1898 } else {
1899
1900 hpsa_remove_sas_device(device);
1901 }
1902 }
1903
1904 static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1905 struct hpsa_scsi_dev_t *sd[], int nsds)
1906 {
1907
1908
1909
1910
1911 int i, entry, device_change, changes = 0;
1912 struct hpsa_scsi_dev_t *csd;
1913 unsigned long flags;
1914 struct hpsa_scsi_dev_t **added, **removed;
1915 int nadded, nremoved;
1916
1917
1918
1919
1920
1921 spin_lock_irqsave(&h->reset_lock, flags);
1922 if (h->reset_in_progress) {
1923 h->drv_req_rescan = 1;
1924 spin_unlock_irqrestore(&h->reset_lock, flags);
1925 return;
1926 }
1927 spin_unlock_irqrestore(&h->reset_lock, flags);
1928
1929 added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL);
1930 removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL);
1931
1932 if (!added || !removed) {
1933 dev_warn(&h->pdev->dev, "out of memory in "
1934 "adjust_hpsa_scsi_table\n");
1935 goto free_and_out;
1936 }
1937
1938 spin_lock_irqsave(&h->devlock, flags);
1939
1940
1941
1942
1943
1944
1945
1946
1947 i = 0;
1948 nremoved = 0;
1949 nadded = 0;
1950 while (i < h->ndevices) {
1951 csd = h->dev[i];
1952 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1953 if (device_change == DEVICE_NOT_FOUND) {
1954 changes++;
1955 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1956 continue;
1957 } else if (device_change == DEVICE_CHANGED) {
1958 changes++;
1959 hpsa_scsi_replace_entry(h, i, sd[entry],
1960 added, &nadded, removed, &nremoved);
1961
1962
1963
1964 sd[entry] = NULL;
1965 } else if (device_change == DEVICE_UPDATED) {
1966 hpsa_scsi_update_entry(h, i, sd[entry]);
1967 }
1968 i++;
1969 }
1970
1971
1972
1973
1974
1975 for (i = 0; i < nsds; i++) {
1976 if (!sd[i])
1977 continue;
1978
1979
1980
1981
1982
1983
1984 if (sd[i]->volume_offline) {
1985 hpsa_show_volume_status(h, sd[i]);
1986 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1987 continue;
1988 }
1989
1990 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1991 h->ndevices, &entry);
1992 if (device_change == DEVICE_NOT_FOUND) {
1993 changes++;
1994 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
1995 break;
1996 sd[i] = NULL;
1997 } else if (device_change == DEVICE_CHANGED) {
1998
1999 changes++;
2000 dev_warn(&h->pdev->dev,
2001 "device unexpectedly changed.\n");
2002
2003 }
2004 }
2005 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015 for (i = 0; i < h->ndevices; i++) {
2016 if (h->dev[i] == NULL)
2017 continue;
2018 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
2019 }
2020
2021 spin_unlock_irqrestore(&h->devlock, flags);
2022
2023
2024
2025
2026
2027 for (i = 0; i < nsds; i++) {
2028 if (!sd[i])
2029 continue;
2030 if (sd[i]->volume_offline)
2031 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
2032 }
2033
2034
2035
2036
2037
2038 if (!changes)
2039 goto free_and_out;
2040
2041
2042 for (i = 0; i < nremoved; i++) {
2043 if (removed[i] == NULL)
2044 continue;
2045 if (removed[i]->expose_device)
2046 hpsa_remove_device(h, removed[i]);
2047 kfree(removed[i]);
2048 removed[i] = NULL;
2049 }
2050
2051
2052 for (i = 0; i < nadded; i++) {
2053 int rc = 0;
2054
2055 if (added[i] == NULL)
2056 continue;
2057 if (!(added[i]->expose_device))
2058 continue;
2059 rc = hpsa_add_device(h, added[i]);
2060 if (!rc)
2061 continue;
2062 dev_warn(&h->pdev->dev,
2063 "addition failed %d, device not added.", rc);
2064
2065
2066
2067 fixup_botched_add(h, added[i]);
2068 h->drv_req_rescan = 1;
2069 }
2070
2071 free_and_out:
2072 kfree(added);
2073 kfree(removed);
2074 }
2075
2076
2077
2078
2079
2080 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
2081 int bus, int target, int lun)
2082 {
2083 int i;
2084 struct hpsa_scsi_dev_t *sd;
2085
2086 for (i = 0; i < h->ndevices; i++) {
2087 sd = h->dev[i];
2088 if (sd->bus == bus && sd->target == target && sd->lun == lun)
2089 return sd;
2090 }
2091 return NULL;
2092 }
2093
2094 static int hpsa_slave_alloc(struct scsi_device *sdev)
2095 {
2096 struct hpsa_scsi_dev_t *sd = NULL;
2097 unsigned long flags;
2098 struct ctlr_info *h;
2099
2100 h = sdev_to_hba(sdev);
2101 spin_lock_irqsave(&h->devlock, flags);
2102 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2103 struct scsi_target *starget;
2104 struct sas_rphy *rphy;
2105
2106 starget = scsi_target(sdev);
2107 rphy = target_to_rphy(starget);
2108 sd = hpsa_find_device_by_sas_rphy(h, rphy);
2109 if (sd) {
2110 sd->target = sdev_id(sdev);
2111 sd->lun = sdev->lun;
2112 }
2113 }
2114 if (!sd)
2115 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2116 sdev_id(sdev), sdev->lun);
2117
2118 if (sd && sd->expose_device) {
2119 atomic_set(&sd->ioaccel_cmds_out, 0);
2120 sdev->hostdata = sd;
2121 } else
2122 sdev->hostdata = NULL;
2123 spin_unlock_irqrestore(&h->devlock, flags);
2124 return 0;
2125 }
2126
2127
2128 static int hpsa_slave_configure(struct scsi_device *sdev)
2129 {
2130 struct hpsa_scsi_dev_t *sd;
2131 int queue_depth;
2132
2133 sd = sdev->hostdata;
2134 sdev->no_uld_attach = !sd || !sd->expose_device;
2135
2136 if (sd) {
2137 sd->was_removed = 0;
2138 if (sd->external) {
2139 queue_depth = EXTERNAL_QD;
2140 sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT;
2141 blk_queue_rq_timeout(sdev->request_queue,
2142 HPSA_EH_PTRAID_TIMEOUT);
2143 } else {
2144 queue_depth = sd->queue_depth != 0 ?
2145 sd->queue_depth : sdev->host->can_queue;
2146 }
2147 } else
2148 queue_depth = sdev->host->can_queue;
2149
2150 scsi_change_queue_depth(sdev, queue_depth);
2151
2152 return 0;
2153 }
2154
2155 static void hpsa_slave_destroy(struct scsi_device *sdev)
2156 {
2157 struct hpsa_scsi_dev_t *hdev = NULL;
2158
2159 hdev = sdev->hostdata;
2160
2161 if (hdev)
2162 hdev->was_removed = 1;
2163 }
2164
2165 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2166 {
2167 int i;
2168
2169 if (!h->ioaccel2_cmd_sg_list)
2170 return;
2171 for (i = 0; i < h->nr_cmds; i++) {
2172 kfree(h->ioaccel2_cmd_sg_list[i]);
2173 h->ioaccel2_cmd_sg_list[i] = NULL;
2174 }
2175 kfree(h->ioaccel2_cmd_sg_list);
2176 h->ioaccel2_cmd_sg_list = NULL;
2177 }
2178
2179 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2180 {
2181 int i;
2182
2183 if (h->chainsize <= 0)
2184 return 0;
2185
2186 h->ioaccel2_cmd_sg_list =
2187 kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list),
2188 GFP_KERNEL);
2189 if (!h->ioaccel2_cmd_sg_list)
2190 return -ENOMEM;
2191 for (i = 0; i < h->nr_cmds; i++) {
2192 h->ioaccel2_cmd_sg_list[i] =
2193 kmalloc_array(h->maxsgentries,
2194 sizeof(*h->ioaccel2_cmd_sg_list[i]),
2195 GFP_KERNEL);
2196 if (!h->ioaccel2_cmd_sg_list[i])
2197 goto clean;
2198 }
2199 return 0;
2200
2201 clean:
2202 hpsa_free_ioaccel2_sg_chain_blocks(h);
2203 return -ENOMEM;
2204 }
2205
2206 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2207 {
2208 int i;
2209
2210 if (!h->cmd_sg_list)
2211 return;
2212 for (i = 0; i < h->nr_cmds; i++) {
2213 kfree(h->cmd_sg_list[i]);
2214 h->cmd_sg_list[i] = NULL;
2215 }
2216 kfree(h->cmd_sg_list);
2217 h->cmd_sg_list = NULL;
2218 }
2219
2220 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2221 {
2222 int i;
2223
2224 if (h->chainsize <= 0)
2225 return 0;
2226
2227 h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list),
2228 GFP_KERNEL);
2229 if (!h->cmd_sg_list)
2230 return -ENOMEM;
2231
2232 for (i = 0; i < h->nr_cmds; i++) {
2233 h->cmd_sg_list[i] = kmalloc_array(h->chainsize,
2234 sizeof(*h->cmd_sg_list[i]),
2235 GFP_KERNEL);
2236 if (!h->cmd_sg_list[i])
2237 goto clean;
2238
2239 }
2240 return 0;
2241
2242 clean:
2243 hpsa_free_sg_chain_blocks(h);
2244 return -ENOMEM;
2245 }
2246
2247 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2248 struct io_accel2_cmd *cp, struct CommandList *c)
2249 {
2250 struct ioaccel2_sg_element *chain_block;
2251 u64 temp64;
2252 u32 chain_size;
2253
2254 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2255 chain_size = le32_to_cpu(cp->sg[0].length);
2256 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
2257 DMA_TO_DEVICE);
2258 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2259
2260 cp->sg->address = 0;
2261 return -1;
2262 }
2263 cp->sg->address = cpu_to_le64(temp64);
2264 return 0;
2265 }
2266
2267 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2268 struct io_accel2_cmd *cp)
2269 {
2270 struct ioaccel2_sg_element *chain_sg;
2271 u64 temp64;
2272 u32 chain_size;
2273
2274 chain_sg = cp->sg;
2275 temp64 = le64_to_cpu(chain_sg->address);
2276 chain_size = le32_to_cpu(cp->sg[0].length);
2277 dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
2278 }
2279
2280 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2281 struct CommandList *c)
2282 {
2283 struct SGDescriptor *chain_sg, *chain_block;
2284 u64 temp64;
2285 u32 chain_len;
2286
2287 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2288 chain_block = h->cmd_sg_list[c->cmdindex];
2289 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2290 chain_len = sizeof(*chain_sg) *
2291 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2292 chain_sg->Len = cpu_to_le32(chain_len);
2293 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
2294 DMA_TO_DEVICE);
2295 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2296
2297 chain_sg->Addr = cpu_to_le64(0);
2298 return -1;
2299 }
2300 chain_sg->Addr = cpu_to_le64(temp64);
2301 return 0;
2302 }
2303
2304 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2305 struct CommandList *c)
2306 {
2307 struct SGDescriptor *chain_sg;
2308
2309 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2310 return;
2311
2312 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2313 dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
2314 le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
2315 }
2316
2317
2318
2319
2320
2321
2322 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2323 struct CommandList *c,
2324 struct scsi_cmnd *cmd,
2325 struct io_accel2_cmd *c2,
2326 struct hpsa_scsi_dev_t *dev)
2327 {
2328 int data_len;
2329 int retry = 0;
2330 u32 ioaccel2_resid = 0;
2331
2332 switch (c2->error_data.serv_response) {
2333 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2334 switch (c2->error_data.status) {
2335 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2336 if (cmd)
2337 cmd->result = 0;
2338 break;
2339 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2340 cmd->result |= SAM_STAT_CHECK_CONDITION;
2341 if (c2->error_data.data_present !=
2342 IOACCEL2_SENSE_DATA_PRESENT) {
2343 memset(cmd->sense_buffer, 0,
2344 SCSI_SENSE_BUFFERSIZE);
2345 break;
2346 }
2347
2348 data_len = c2->error_data.sense_data_len;
2349 if (data_len > SCSI_SENSE_BUFFERSIZE)
2350 data_len = SCSI_SENSE_BUFFERSIZE;
2351 if (data_len > sizeof(c2->error_data.sense_data_buff))
2352 data_len =
2353 sizeof(c2->error_data.sense_data_buff);
2354 memcpy(cmd->sense_buffer,
2355 c2->error_data.sense_data_buff, data_len);
2356 retry = 1;
2357 break;
2358 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2359 retry = 1;
2360 break;
2361 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2362 retry = 1;
2363 break;
2364 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2365 retry = 1;
2366 break;
2367 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2368 retry = 1;
2369 break;
2370 default:
2371 retry = 1;
2372 break;
2373 }
2374 break;
2375 case IOACCEL2_SERV_RESPONSE_FAILURE:
2376 switch (c2->error_data.status) {
2377 case IOACCEL2_STATUS_SR_IO_ERROR:
2378 case IOACCEL2_STATUS_SR_IO_ABORTED:
2379 case IOACCEL2_STATUS_SR_OVERRUN:
2380 retry = 1;
2381 break;
2382 case IOACCEL2_STATUS_SR_UNDERRUN:
2383 cmd->result = (DID_OK << 16);
2384 cmd->result |= (COMMAND_COMPLETE << 8);
2385 ioaccel2_resid = get_unaligned_le32(
2386 &c2->error_data.resid_cnt[0]);
2387 scsi_set_resid(cmd, ioaccel2_resid);
2388 break;
2389 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2390 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2391 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2392
2393
2394
2395
2396
2397
2398
2399
2400 if (dev->physical_device && dev->expose_device) {
2401 cmd->result = DID_NO_CONNECT << 16;
2402 dev->removed = 1;
2403 h->drv_req_rescan = 1;
2404 dev_warn(&h->pdev->dev,
2405 "%s: device is gone!\n", __func__);
2406 } else
2407
2408
2409
2410
2411
2412 retry = 1;
2413 break;
2414 default:
2415 retry = 1;
2416 }
2417 break;
2418 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2419 break;
2420 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2421 break;
2422 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2423 retry = 1;
2424 break;
2425 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2426 break;
2427 default:
2428 retry = 1;
2429 break;
2430 }
2431
2432 if (dev->in_reset)
2433 retry = 0;
2434
2435 return retry;
2436 }
2437
2438 static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2439 struct CommandList *c)
2440 {
2441 struct hpsa_scsi_dev_t *dev = c->device;
2442
2443
2444
2445
2446
2447
2448 c->scsi_cmd = SCSI_CMD_IDLE;
2449 mb();
2450 if (dev) {
2451 atomic_dec(&dev->commands_outstanding);
2452 if (dev->in_reset &&
2453 atomic_read(&dev->commands_outstanding) <= 0)
2454 wake_up_all(&h->event_sync_wait_queue);
2455 }
2456 }
2457
2458 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2459 struct CommandList *c)
2460 {
2461 hpsa_cmd_resolve_events(h, c);
2462 cmd_tagged_free(h, c);
2463 }
2464
2465 static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2466 struct CommandList *c, struct scsi_cmnd *cmd)
2467 {
2468 hpsa_cmd_resolve_and_free(h, c);
2469 if (cmd && cmd->scsi_done)
2470 cmd->scsi_done(cmd);
2471 }
2472
2473 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2474 {
2475 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2476 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2477 }
2478
2479 static void process_ioaccel2_completion(struct ctlr_info *h,
2480 struct CommandList *c, struct scsi_cmnd *cmd,
2481 struct hpsa_scsi_dev_t *dev)
2482 {
2483 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2484
2485
2486 if (likely(c2->error_data.serv_response == 0 &&
2487 c2->error_data.status == 0)) {
2488 cmd->result = 0;
2489 return hpsa_cmd_free_and_done(h, c, cmd);
2490 }
2491
2492
2493
2494
2495
2496
2497 if (is_logical_device(dev) &&
2498 c2->error_data.serv_response ==
2499 IOACCEL2_SERV_RESPONSE_FAILURE) {
2500 if (c2->error_data.status ==
2501 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
2502 dev->offload_enabled = 0;
2503 dev->offload_to_be_enabled = 0;
2504 }
2505
2506 if (dev->in_reset) {
2507 cmd->result = DID_RESET << 16;
2508 return hpsa_cmd_free_and_done(h, c, cmd);
2509 }
2510
2511 return hpsa_retry_cmd(h, c);
2512 }
2513
2514 if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
2515 return hpsa_retry_cmd(h, c);
2516
2517 return hpsa_cmd_free_and_done(h, c, cmd);
2518 }
2519
2520
2521 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2522 struct CommandList *cp)
2523 {
2524 u8 tmf_status = cp->err_info->ScsiStatus;
2525
2526 switch (tmf_status) {
2527 case CISS_TMF_COMPLETE:
2528
2529
2530
2531
2532 case CISS_TMF_SUCCESS:
2533 return 0;
2534 case CISS_TMF_INVALID_FRAME:
2535 case CISS_TMF_NOT_SUPPORTED:
2536 case CISS_TMF_FAILED:
2537 case CISS_TMF_WRONG_LUN:
2538 case CISS_TMF_OVERLAPPED_TAG:
2539 break;
2540 default:
2541 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2542 tmf_status);
2543 break;
2544 }
2545 return -tmf_status;
2546 }
2547
2548 static void complete_scsi_command(struct CommandList *cp)
2549 {
2550 struct scsi_cmnd *cmd;
2551 struct ctlr_info *h;
2552 struct ErrorInfo *ei;
2553 struct hpsa_scsi_dev_t *dev;
2554 struct io_accel2_cmd *c2;
2555
2556 u8 sense_key;
2557 u8 asc;
2558 u8 ascq;
2559 unsigned long sense_data_size;
2560
2561 ei = cp->err_info;
2562 cmd = cp->scsi_cmd;
2563 h = cp->h;
2564
2565 if (!cmd->device) {
2566 cmd->result = DID_NO_CONNECT << 16;
2567 return hpsa_cmd_free_and_done(h, cp, cmd);
2568 }
2569
2570 dev = cmd->device->hostdata;
2571 if (!dev) {
2572 cmd->result = DID_NO_CONNECT << 16;
2573 return hpsa_cmd_free_and_done(h, cp, cmd);
2574 }
2575 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2576
2577 scsi_dma_unmap(cmd);
2578 if ((cp->cmd_type == CMD_SCSI) &&
2579 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2580 hpsa_unmap_sg_chain_block(h, cp);
2581
2582 if ((cp->cmd_type == CMD_IOACCEL2) &&
2583 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2584 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2585
2586 cmd->result = (DID_OK << 16);
2587 cmd->result |= (COMMAND_COMPLETE << 8);
2588
2589
2590 if (dev->was_removed) {
2591 hpsa_cmd_resolve_and_free(h, cp);
2592 return;
2593 }
2594
2595 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2596 if (dev->physical_device && dev->expose_device &&
2597 dev->removed) {
2598 cmd->result = DID_NO_CONNECT << 16;
2599 return hpsa_cmd_free_and_done(h, cp, cmd);
2600 }
2601 if (likely(cp->phys_disk != NULL))
2602 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2603 }
2604
2605
2606
2607
2608
2609
2610 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2611
2612 cmd->result = DID_NO_CONNECT << 16;
2613 return hpsa_cmd_free_and_done(h, cp, cmd);
2614 }
2615
2616 if (cp->cmd_type == CMD_IOACCEL2)
2617 return process_ioaccel2_completion(h, cp, cmd, dev);
2618
2619 scsi_set_resid(cmd, ei->ResidualCnt);
2620 if (ei->CommandStatus == 0)
2621 return hpsa_cmd_free_and_done(h, cp, cmd);
2622
2623
2624
2625
2626 if (cp->cmd_type == CMD_IOACCEL1) {
2627 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2628 cp->Header.SGList = scsi_sg_count(cmd);
2629 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2630 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2631 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2632 cp->Header.tag = c->tag;
2633 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2634 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2635
2636
2637
2638
2639
2640 if (is_logical_device(dev)) {
2641 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2642 dev->offload_enabled = 0;
2643 return hpsa_retry_cmd(h, cp);
2644 }
2645 }
2646
2647
2648 switch (ei->CommandStatus) {
2649
2650 case CMD_TARGET_STATUS:
2651 cmd->result |= ei->ScsiStatus;
2652
2653 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2654 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2655 else
2656 sense_data_size = sizeof(ei->SenseInfo);
2657 if (ei->SenseLen < sense_data_size)
2658 sense_data_size = ei->SenseLen;
2659 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2660 if (ei->ScsiStatus)
2661 decode_sense_data(ei->SenseInfo, sense_data_size,
2662 &sense_key, &asc, &ascq);
2663 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2664 switch (sense_key) {
2665 case ABORTED_COMMAND:
2666 cmd->result |= DID_SOFT_ERROR << 16;
2667 break;
2668 case UNIT_ATTENTION:
2669 if (asc == 0x3F && ascq == 0x0E)
2670 h->drv_req_rescan = 1;
2671 break;
2672 case ILLEGAL_REQUEST:
2673 if (asc == 0x25 && ascq == 0x00) {
2674 dev->removed = 1;
2675 cmd->result = DID_NO_CONNECT << 16;
2676 }
2677 break;
2678 }
2679 break;
2680 }
2681
2682
2683
2684 if (ei->ScsiStatus) {
2685 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2686 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2687 "Returning result: 0x%x\n",
2688 cp, ei->ScsiStatus,
2689 sense_key, asc, ascq,
2690 cmd->result);
2691 } else {
2692 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2693 "Returning no connection.\n", cp),
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707 cmd->result = DID_NO_CONNECT << 16;
2708 }
2709 break;
2710
2711 case CMD_DATA_UNDERRUN:
2712 break;
2713 case CMD_DATA_OVERRUN:
2714 dev_warn(&h->pdev->dev,
2715 "CDB %16phN data overrun\n", cp->Request.CDB);
2716 break;
2717 case CMD_INVALID: {
2718
2719
2720
2721
2722
2723
2724
2725
2726 cmd->result = DID_NO_CONNECT << 16;
2727 }
2728 break;
2729 case CMD_PROTOCOL_ERR:
2730 cmd->result = DID_ERROR << 16;
2731 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2732 cp->Request.CDB);
2733 break;
2734 case CMD_HARDWARE_ERR:
2735 cmd->result = DID_ERROR << 16;
2736 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2737 cp->Request.CDB);
2738 break;
2739 case CMD_CONNECTION_LOST:
2740 cmd->result = DID_ERROR << 16;
2741 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2742 cp->Request.CDB);
2743 break;
2744 case CMD_ABORTED:
2745 cmd->result = DID_ABORT << 16;
2746 break;
2747 case CMD_ABORT_FAILED:
2748 cmd->result = DID_ERROR << 16;
2749 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2750 cp->Request.CDB);
2751 break;
2752 case CMD_UNSOLICITED_ABORT:
2753 cmd->result = DID_SOFT_ERROR << 16;
2754 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2755 cp->Request.CDB);
2756 break;
2757 case CMD_TIMEOUT:
2758 cmd->result = DID_TIME_OUT << 16;
2759 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2760 cp->Request.CDB);
2761 break;
2762 case CMD_UNABORTABLE:
2763 cmd->result = DID_ERROR << 16;
2764 dev_warn(&h->pdev->dev, "Command unabortable\n");
2765 break;
2766 case CMD_TMF_STATUS:
2767 if (hpsa_evaluate_tmf_status(h, cp))
2768 cmd->result = DID_ERROR << 16;
2769 break;
2770 case CMD_IOACCEL_DISABLED:
2771
2772
2773
2774 cmd->result = DID_SOFT_ERROR << 16;
2775 dev_warn(&h->pdev->dev,
2776 "cp %p had HP SSD Smart Path error\n", cp);
2777 break;
2778 default:
2779 cmd->result = DID_ERROR << 16;
2780 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2781 cp, ei->CommandStatus);
2782 }
2783
2784 return hpsa_cmd_free_and_done(h, cp, cmd);
2785 }
2786
2787 static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
2788 int sg_used, enum dma_data_direction data_direction)
2789 {
2790 int i;
2791
2792 for (i = 0; i < sg_used; i++)
2793 dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
2794 le32_to_cpu(c->SG[i].Len),
2795 data_direction);
2796 }
2797
2798 static int hpsa_map_one(struct pci_dev *pdev,
2799 struct CommandList *cp,
2800 unsigned char *buf,
2801 size_t buflen,
2802 enum dma_data_direction data_direction)
2803 {
2804 u64 addr64;
2805
2806 if (buflen == 0 || data_direction == DMA_NONE) {
2807 cp->Header.SGList = 0;
2808 cp->Header.SGTotal = cpu_to_le16(0);
2809 return 0;
2810 }
2811
2812 addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
2813 if (dma_mapping_error(&pdev->dev, addr64)) {
2814
2815 cp->Header.SGList = 0;
2816 cp->Header.SGTotal = cpu_to_le16(0);
2817 return -1;
2818 }
2819 cp->SG[0].Addr = cpu_to_le64(addr64);
2820 cp->SG[0].Len = cpu_to_le32(buflen);
2821 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST);
2822 cp->Header.SGList = 1;
2823 cp->Header.SGTotal = cpu_to_le16(1);
2824 return 0;
2825 }
2826
2827 #define NO_TIMEOUT ((unsigned long) -1)
2828 #define DEFAULT_TIMEOUT 30000
2829 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2830 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2831 {
2832 DECLARE_COMPLETION_ONSTACK(wait);
2833
2834 c->waiting = &wait;
2835 __enqueue_cmd_and_start_io(h, c, reply_queue);
2836 if (timeout_msecs == NO_TIMEOUT) {
2837
2838 wait_for_completion_io(&wait);
2839 return IO_OK;
2840 }
2841 if (!wait_for_completion_io_timeout(&wait,
2842 msecs_to_jiffies(timeout_msecs))) {
2843 dev_warn(&h->pdev->dev, "Command timed out.\n");
2844 return -ETIMEDOUT;
2845 }
2846 return IO_OK;
2847 }
2848
2849 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2850 int reply_queue, unsigned long timeout_msecs)
2851 {
2852 if (unlikely(lockup_detected(h))) {
2853 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2854 return IO_OK;
2855 }
2856 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2857 }
2858
2859 static u32 lockup_detected(struct ctlr_info *h)
2860 {
2861 int cpu;
2862 u32 rc, *lockup_detected;
2863
2864 cpu = get_cpu();
2865 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2866 rc = *lockup_detected;
2867 put_cpu();
2868 return rc;
2869 }
2870
2871 #define MAX_DRIVER_CMD_RETRIES 25
2872 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2873 struct CommandList *c, enum dma_data_direction data_direction,
2874 unsigned long timeout_msecs)
2875 {
2876 int backoff_time = 10, retry_count = 0;
2877 int rc;
2878
2879 do {
2880 memset(c->err_info, 0, sizeof(*c->err_info));
2881 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2882 timeout_msecs);
2883 if (rc)
2884 break;
2885 retry_count++;
2886 if (retry_count > 3) {
2887 msleep(backoff_time);
2888 if (backoff_time < 1000)
2889 backoff_time *= 2;
2890 }
2891 } while ((check_for_unit_attention(h, c) ||
2892 check_for_busy(h, c)) &&
2893 retry_count <= MAX_DRIVER_CMD_RETRIES);
2894 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2895 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2896 rc = -EIO;
2897 return rc;
2898 }
2899
2900 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2901 struct CommandList *c)
2902 {
2903 const u8 *cdb = c->Request.CDB;
2904 const u8 *lun = c->Header.LUN.LunAddrBytes;
2905
2906 dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
2907 txt, lun, cdb);
2908 }
2909
2910 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2911 struct CommandList *cp)
2912 {
2913 const struct ErrorInfo *ei = cp->err_info;
2914 struct device *d = &cp->h->pdev->dev;
2915 u8 sense_key, asc, ascq;
2916 int sense_len;
2917
2918 switch (ei->CommandStatus) {
2919 case CMD_TARGET_STATUS:
2920 if (ei->SenseLen > sizeof(ei->SenseInfo))
2921 sense_len = sizeof(ei->SenseInfo);
2922 else
2923 sense_len = ei->SenseLen;
2924 decode_sense_data(ei->SenseInfo, sense_len,
2925 &sense_key, &asc, &ascq);
2926 hpsa_print_cmd(h, "SCSI status", cp);
2927 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2928 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2929 sense_key, asc, ascq);
2930 else
2931 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2932 if (ei->ScsiStatus == 0)
2933 dev_warn(d, "SCSI status is abnormally zero. "
2934 "(probably indicates selection timeout "
2935 "reported incorrectly due to a known "
2936 "firmware bug, circa July, 2001.)\n");
2937 break;
2938 case CMD_DATA_UNDERRUN:
2939 break;
2940 case CMD_DATA_OVERRUN:
2941 hpsa_print_cmd(h, "overrun condition", cp);
2942 break;
2943 case CMD_INVALID: {
2944
2945
2946
2947 hpsa_print_cmd(h, "invalid command", cp);
2948 dev_warn(d, "probably means device no longer present\n");
2949 }
2950 break;
2951 case CMD_PROTOCOL_ERR:
2952 hpsa_print_cmd(h, "protocol error", cp);
2953 break;
2954 case CMD_HARDWARE_ERR:
2955 hpsa_print_cmd(h, "hardware error", cp);
2956 break;
2957 case CMD_CONNECTION_LOST:
2958 hpsa_print_cmd(h, "connection lost", cp);
2959 break;
2960 case CMD_ABORTED:
2961 hpsa_print_cmd(h, "aborted", cp);
2962 break;
2963 case CMD_ABORT_FAILED:
2964 hpsa_print_cmd(h, "abort failed", cp);
2965 break;
2966 case CMD_UNSOLICITED_ABORT:
2967 hpsa_print_cmd(h, "unsolicited abort", cp);
2968 break;
2969 case CMD_TIMEOUT:
2970 hpsa_print_cmd(h, "timed out", cp);
2971 break;
2972 case CMD_UNABORTABLE:
2973 hpsa_print_cmd(h, "unabortable", cp);
2974 break;
2975 case CMD_CTLR_LOCKUP:
2976 hpsa_print_cmd(h, "controller lockup detected", cp);
2977 break;
2978 default:
2979 hpsa_print_cmd(h, "unknown status", cp);
2980 dev_warn(d, "Unknown command status %x\n",
2981 ei->CommandStatus);
2982 }
2983 }
2984
2985 static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
2986 u8 page, u8 *buf, size_t bufsize)
2987 {
2988 int rc = IO_OK;
2989 struct CommandList *c;
2990 struct ErrorInfo *ei;
2991
2992 c = cmd_alloc(h);
2993 if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
2994 page, scsi3addr, TYPE_CMD)) {
2995 rc = -1;
2996 goto out;
2997 }
2998 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
2999 NO_TIMEOUT);
3000 if (rc)
3001 goto out;
3002 ei = c->err_info;
3003 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3004 hpsa_scsi_interpret_error(h, c);
3005 rc = -1;
3006 }
3007 out:
3008 cmd_free(h, c);
3009 return rc;
3010 }
3011
3012 static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
3013 u8 *scsi3addr)
3014 {
3015 u8 *buf;
3016 u64 sa = 0;
3017 int rc = 0;
3018
3019 buf = kzalloc(1024, GFP_KERNEL);
3020 if (!buf)
3021 return 0;
3022
3023 rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
3024 buf, 1024);
3025
3026 if (rc)
3027 goto out;
3028
3029 sa = get_unaligned_be64(buf+12);
3030
3031 out:
3032 kfree(buf);
3033 return sa;
3034 }
3035
3036 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
3037 u16 page, unsigned char *buf,
3038 unsigned char bufsize)
3039 {
3040 int rc = IO_OK;
3041 struct CommandList *c;
3042 struct ErrorInfo *ei;
3043
3044 c = cmd_alloc(h);
3045
3046 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
3047 page, scsi3addr, TYPE_CMD)) {
3048 rc = -1;
3049 goto out;
3050 }
3051 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3052 NO_TIMEOUT);
3053 if (rc)
3054 goto out;
3055 ei = c->err_info;
3056 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3057 hpsa_scsi_interpret_error(h, c);
3058 rc = -1;
3059 }
3060 out:
3061 cmd_free(h, c);
3062 return rc;
3063 }
3064
3065 static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3066 u8 reset_type, int reply_queue)
3067 {
3068 int rc = IO_OK;
3069 struct CommandList *c;
3070 struct ErrorInfo *ei;
3071
3072 c = cmd_alloc(h);
3073 c->device = dev;
3074
3075
3076 (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG);
3077 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
3078 if (rc) {
3079 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
3080 goto out;
3081 }
3082
3083
3084 ei = c->err_info;
3085 if (ei->CommandStatus != 0) {
3086 hpsa_scsi_interpret_error(h, c);
3087 rc = -1;
3088 }
3089 out:
3090 cmd_free(h, c);
3091 return rc;
3092 }
3093
3094 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
3095 struct hpsa_scsi_dev_t *dev,
3096 unsigned char *scsi3addr)
3097 {
3098 int i;
3099 bool match = false;
3100 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
3101 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
3102
3103 if (hpsa_is_cmd_idle(c))
3104 return false;
3105
3106 switch (c->cmd_type) {
3107 case CMD_SCSI:
3108 case CMD_IOCTL_PEND:
3109 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
3110 sizeof(c->Header.LUN.LunAddrBytes));
3111 break;
3112
3113 case CMD_IOACCEL1:
3114 case CMD_IOACCEL2:
3115 if (c->phys_disk == dev) {
3116
3117 match = true;
3118 } else {
3119
3120
3121
3122
3123 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3124
3125
3126
3127
3128 match = dev->phys_disk[i] == c->phys_disk;
3129 }
3130 }
3131 break;
3132
3133 case IOACCEL2_TMF:
3134 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3135 match = dev->phys_disk[i]->ioaccel_handle ==
3136 le32_to_cpu(ac->it_nexus);
3137 }
3138 break;
3139
3140 case 0:
3141 match = false;
3142 break;
3143
3144 default:
3145 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
3146 c->cmd_type);
3147 BUG();
3148 }
3149
3150 return match;
3151 }
3152
3153 static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3154 u8 reset_type, int reply_queue)
3155 {
3156 int rc = 0;
3157
3158
3159 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
3160 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
3161 return -EINTR;
3162 }
3163
3164 rc = hpsa_send_reset(h, dev, reset_type, reply_queue);
3165 if (!rc) {
3166
3167 atomic_dec(&dev->commands_outstanding);
3168 wait_event(h->event_sync_wait_queue,
3169 atomic_read(&dev->commands_outstanding) <= 0 ||
3170 lockup_detected(h));
3171 }
3172
3173 if (unlikely(lockup_detected(h))) {
3174 dev_warn(&h->pdev->dev,
3175 "Controller lockup detected during reset wait\n");
3176 rc = -ENODEV;
3177 }
3178
3179 if (!rc)
3180 rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0);
3181
3182 mutex_unlock(&h->reset_mutex);
3183 return rc;
3184 }
3185
3186 static void hpsa_get_raid_level(struct ctlr_info *h,
3187 unsigned char *scsi3addr, unsigned char *raid_level)
3188 {
3189 int rc;
3190 unsigned char *buf;
3191
3192 *raid_level = RAID_UNKNOWN;
3193 buf = kzalloc(64, GFP_KERNEL);
3194 if (!buf)
3195 return;
3196
3197 if (!hpsa_vpd_page_supported(h, scsi3addr,
3198 HPSA_VPD_LV_DEVICE_GEOMETRY))
3199 goto exit;
3200
3201 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3202 HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
3203
3204 if (rc == 0)
3205 *raid_level = buf[8];
3206 if (*raid_level > RAID_UNKNOWN)
3207 *raid_level = RAID_UNKNOWN;
3208 exit:
3209 kfree(buf);
3210 return;
3211 }
3212
3213 #define HPSA_MAP_DEBUG
3214 #ifdef HPSA_MAP_DEBUG
3215 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
3216 struct raid_map_data *map_buff)
3217 {
3218 struct raid_map_disk_data *dd = &map_buff->data[0];
3219 int map, row, col;
3220 u16 map_cnt, row_cnt, disks_per_row;
3221
3222 if (rc != 0)
3223 return;
3224
3225
3226 if (h->raid_offload_debug < 2)
3227 return;
3228
3229 dev_info(&h->pdev->dev, "structure_size = %u\n",
3230 le32_to_cpu(map_buff->structure_size));
3231 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3232 le32_to_cpu(map_buff->volume_blk_size));
3233 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3234 le64_to_cpu(map_buff->volume_blk_cnt));
3235 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3236 map_buff->phys_blk_shift);
3237 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3238 map_buff->parity_rotation_shift);
3239 dev_info(&h->pdev->dev, "strip_size = %u\n",
3240 le16_to_cpu(map_buff->strip_size));
3241 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3242 le64_to_cpu(map_buff->disk_starting_blk));
3243 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3244 le64_to_cpu(map_buff->disk_blk_cnt));
3245 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3246 le16_to_cpu(map_buff->data_disks_per_row));
3247 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3248 le16_to_cpu(map_buff->metadata_disks_per_row));
3249 dev_info(&h->pdev->dev, "row_cnt = %u\n",
3250 le16_to_cpu(map_buff->row_cnt));
3251 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3252 le16_to_cpu(map_buff->layout_map_count));
3253 dev_info(&h->pdev->dev, "flags = 0x%x\n",
3254 le16_to_cpu(map_buff->flags));
3255 dev_info(&h->pdev->dev, "encryption = %s\n",
3256 le16_to_cpu(map_buff->flags) &
3257 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
3258 dev_info(&h->pdev->dev, "dekindex = %u\n",
3259 le16_to_cpu(map_buff->dekindex));
3260 map_cnt = le16_to_cpu(map_buff->layout_map_count);
3261 for (map = 0; map < map_cnt; map++) {
3262 dev_info(&h->pdev->dev, "Map%u:\n", map);
3263 row_cnt = le16_to_cpu(map_buff->row_cnt);
3264 for (row = 0; row < row_cnt; row++) {
3265 dev_info(&h->pdev->dev, " Row%u:\n", row);
3266 disks_per_row =
3267 le16_to_cpu(map_buff->data_disks_per_row);
3268 for (col = 0; col < disks_per_row; col++, dd++)
3269 dev_info(&h->pdev->dev,
3270 " D%02u: h=0x%04x xor=%u,%u\n",
3271 col, dd->ioaccel_handle,
3272 dd->xor_mult[0], dd->xor_mult[1]);
3273 disks_per_row =
3274 le16_to_cpu(map_buff->metadata_disks_per_row);
3275 for (col = 0; col < disks_per_row; col++, dd++)
3276 dev_info(&h->pdev->dev,
3277 " M%02u: h=0x%04x xor=%u,%u\n",
3278 col, dd->ioaccel_handle,
3279 dd->xor_mult[0], dd->xor_mult[1]);
3280 }
3281 }
3282 }
3283 #else
3284 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3285 __attribute__((unused)) int rc,
3286 __attribute__((unused)) struct raid_map_data *map_buff)
3287 {
3288 }
3289 #endif
3290
3291 static int hpsa_get_raid_map(struct ctlr_info *h,
3292 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3293 {
3294 int rc = 0;
3295 struct CommandList *c;
3296 struct ErrorInfo *ei;
3297
3298 c = cmd_alloc(h);
3299
3300 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3301 sizeof(this_device->raid_map), 0,
3302 scsi3addr, TYPE_CMD)) {
3303 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3304 cmd_free(h, c);
3305 return -1;
3306 }
3307 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3308 NO_TIMEOUT);
3309 if (rc)
3310 goto out;
3311 ei = c->err_info;
3312 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3313 hpsa_scsi_interpret_error(h, c);
3314 rc = -1;
3315 goto out;
3316 }
3317 cmd_free(h, c);
3318
3319
3320 if (le32_to_cpu(this_device->raid_map.structure_size) >
3321 sizeof(this_device->raid_map)) {
3322 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3323 rc = -1;
3324 }
3325 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3326 return rc;
3327 out:
3328 cmd_free(h, c);
3329 return rc;
3330 }
3331
3332 static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3333 unsigned char scsi3addr[], u16 bmic_device_index,
3334 struct bmic_sense_subsystem_info *buf, size_t bufsize)
3335 {
3336 int rc = IO_OK;
3337 struct CommandList *c;
3338 struct ErrorInfo *ei;
3339
3340 c = cmd_alloc(h);
3341
3342 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3343 0, RAID_CTLR_LUNID, TYPE_CMD);
3344 if (rc)
3345 goto out;
3346
3347 c->Request.CDB[2] = bmic_device_index & 0xff;
3348 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3349
3350 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3351 NO_TIMEOUT);
3352 if (rc)
3353 goto out;
3354 ei = c->err_info;
3355 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3356 hpsa_scsi_interpret_error(h, c);
3357 rc = -1;
3358 }
3359 out:
3360 cmd_free(h, c);
3361 return rc;
3362 }
3363
3364 static int hpsa_bmic_id_controller(struct ctlr_info *h,
3365 struct bmic_identify_controller *buf, size_t bufsize)
3366 {
3367 int rc = IO_OK;
3368 struct CommandList *c;
3369 struct ErrorInfo *ei;
3370
3371 c = cmd_alloc(h);
3372
3373 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3374 0, RAID_CTLR_LUNID, TYPE_CMD);
3375 if (rc)
3376 goto out;
3377
3378 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3379 NO_TIMEOUT);
3380 if (rc)
3381 goto out;
3382 ei = c->err_info;
3383 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3384 hpsa_scsi_interpret_error(h, c);
3385 rc = -1;
3386 }
3387 out:
3388 cmd_free(h, c);
3389 return rc;
3390 }
3391
3392 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3393 unsigned char scsi3addr[], u16 bmic_device_index,
3394 struct bmic_identify_physical_device *buf, size_t bufsize)
3395 {
3396 int rc = IO_OK;
3397 struct CommandList *c;
3398 struct ErrorInfo *ei;
3399
3400 c = cmd_alloc(h);
3401 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3402 0, RAID_CTLR_LUNID, TYPE_CMD);
3403 if (rc)
3404 goto out;
3405
3406 c->Request.CDB[2] = bmic_device_index & 0xff;
3407 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3408
3409 hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3410 NO_TIMEOUT);
3411 ei = c->err_info;
3412 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3413 hpsa_scsi_interpret_error(h, c);
3414 rc = -1;
3415 }
3416 out:
3417 cmd_free(h, c);
3418
3419 return rc;
3420 }
3421
3422
3423
3424
3425
3426
3427
3428 static void hpsa_get_enclosure_info(struct ctlr_info *h,
3429 unsigned char *scsi3addr,
3430 struct ReportExtendedLUNdata *rlep, int rle_index,
3431 struct hpsa_scsi_dev_t *encl_dev)
3432 {
3433 int rc = -1;
3434 struct CommandList *c = NULL;
3435 struct ErrorInfo *ei = NULL;
3436 struct bmic_sense_storage_box_params *bssbp = NULL;
3437 struct bmic_identify_physical_device *id_phys = NULL;
3438 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3439 u16 bmic_device_index = 0;
3440
3441 encl_dev->eli =
3442 hpsa_get_enclosure_logical_identifier(h, scsi3addr);
3443
3444 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3445
3446 if (encl_dev->target == -1 || encl_dev->lun == -1) {
3447 rc = IO_OK;
3448 goto out;
3449 }
3450
3451 if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
3452 rc = IO_OK;
3453 goto out;
3454 }
3455
3456 bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
3457 if (!bssbp)
3458 goto out;
3459
3460 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3461 if (!id_phys)
3462 goto out;
3463
3464 rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
3465 id_phys, sizeof(*id_phys));
3466 if (rc) {
3467 dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
3468 __func__, encl_dev->external, bmic_device_index);
3469 goto out;
3470 }
3471
3472 c = cmd_alloc(h);
3473
3474 rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
3475 sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
3476
3477 if (rc)
3478 goto out;
3479
3480 if (id_phys->phys_connector[1] == 'E')
3481 c->Request.CDB[5] = id_phys->box_index;
3482 else
3483 c->Request.CDB[5] = 0;
3484
3485 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3486 NO_TIMEOUT);
3487 if (rc)
3488 goto out;
3489
3490 ei = c->err_info;
3491 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3492 rc = -1;
3493 goto out;
3494 }
3495
3496 encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
3497 memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
3498 bssbp->phys_connector, sizeof(bssbp->phys_connector));
3499
3500 rc = IO_OK;
3501 out:
3502 kfree(bssbp);
3503 kfree(id_phys);
3504
3505 if (c)
3506 cmd_free(h, c);
3507
3508 if (rc != IO_OK)
3509 hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
3510 "Error, could not get enclosure information");
3511 }
3512
3513 static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3514 unsigned char *scsi3addr)
3515 {
3516 struct ReportExtendedLUNdata *physdev;
3517 u32 nphysicals;
3518 u64 sa = 0;
3519 int i;
3520
3521 physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3522 if (!physdev)
3523 return 0;
3524
3525 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3526 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3527 kfree(physdev);
3528 return 0;
3529 }
3530 nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3531
3532 for (i = 0; i < nphysicals; i++)
3533 if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3534 sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3535 break;
3536 }
3537
3538 kfree(physdev);
3539
3540 return sa;
3541 }
3542
3543 static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3544 struct hpsa_scsi_dev_t *dev)
3545 {
3546 int rc;
3547 u64 sa = 0;
3548
3549 if (is_hba_lunid(scsi3addr)) {
3550 struct bmic_sense_subsystem_info *ssi;
3551
3552 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3553 if (!ssi)
3554 return;
3555
3556 rc = hpsa_bmic_sense_subsystem_information(h,
3557 scsi3addr, 0, ssi, sizeof(*ssi));
3558 if (rc == 0) {
3559 sa = get_unaligned_be64(ssi->primary_world_wide_id);
3560 h->sas_address = sa;
3561 }
3562
3563 kfree(ssi);
3564 } else
3565 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3566
3567 dev->sas_address = sa;
3568 }
3569
3570 static void hpsa_ext_ctrl_present(struct ctlr_info *h,
3571 struct ReportExtendedLUNdata *physdev)
3572 {
3573 u32 nphysicals;
3574 int i;
3575
3576 if (h->discovery_polling)
3577 return;
3578
3579 nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
3580
3581 for (i = 0; i < nphysicals; i++) {
3582 if (physdev->LUN[i].device_type ==
3583 BMIC_DEVICE_TYPE_CONTROLLER
3584 && !is_hba_lunid(physdev->LUN[i].lunid)) {
3585 dev_info(&h->pdev->dev,
3586 "External controller present, activate discovery polling and disable rld caching\n");
3587 hpsa_disable_rld_caching(h);
3588 h->discovery_polling = 1;
3589 break;
3590 }
3591 }
3592 }
3593
3594
3595 static bool hpsa_vpd_page_supported(struct ctlr_info *h,
3596 unsigned char scsi3addr[], u8 page)
3597 {
3598 int rc;
3599 int i;
3600 int pages;
3601 unsigned char *buf, bufsize;
3602
3603 buf = kzalloc(256, GFP_KERNEL);
3604 if (!buf)
3605 return false;
3606
3607
3608 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3609 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3610 buf, HPSA_VPD_HEADER_SZ);
3611 if (rc != 0)
3612 goto exit_unsupported;
3613 pages = buf[3];
3614 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3615 bufsize = pages + HPSA_VPD_HEADER_SZ;
3616 else
3617 bufsize = 255;
3618
3619
3620 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3621 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3622 buf, bufsize);
3623 if (rc != 0)
3624 goto exit_unsupported;
3625
3626 pages = buf[3];
3627 for (i = 1; i <= pages; i++)
3628 if (buf[3 + i] == page)
3629 goto exit_supported;
3630 exit_unsupported:
3631 kfree(buf);
3632 return false;
3633 exit_supported:
3634 kfree(buf);
3635 return true;
3636 }
3637
3638
3639
3640
3641
3642
3643
3644
3645 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3646 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3647 {
3648 int rc;
3649 unsigned char *buf;
3650 u8 ioaccel_status;
3651
3652 this_device->offload_config = 0;
3653 this_device->offload_enabled = 0;
3654 this_device->offload_to_be_enabled = 0;
3655
3656 buf = kzalloc(64, GFP_KERNEL);
3657 if (!buf)
3658 return;
3659 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3660 goto out;
3661 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3662 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3663 if (rc != 0)
3664 goto out;
3665
3666 #define IOACCEL_STATUS_BYTE 4
3667 #define OFFLOAD_CONFIGURED_BIT 0x01
3668 #define OFFLOAD_ENABLED_BIT 0x02
3669 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3670 this_device->offload_config =
3671 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3672 if (this_device->offload_config) {
3673 this_device->offload_to_be_enabled =
3674 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3675 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3676 this_device->offload_to_be_enabled = 0;
3677 }
3678
3679 out:
3680 kfree(buf);
3681 return;
3682 }
3683
3684
3685 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3686 unsigned char *device_id, int index, int buflen)
3687 {
3688 int rc;
3689 unsigned char *buf;
3690
3691
3692 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
3693 return 1;
3694
3695 buf = kzalloc(64, GFP_KERNEL);
3696 if (!buf)
3697 return -ENOMEM;
3698
3699 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3700 HPSA_VPD_LV_DEVICE_ID, buf, 64);
3701 if (rc == 0) {
3702 if (buflen > 16)
3703 buflen = 16;
3704 memcpy(device_id, &buf[8], buflen);
3705 }
3706
3707 kfree(buf);
3708
3709 return rc;
3710 }
3711
3712 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3713 void *buf, int bufsize,
3714 int extended_response)
3715 {
3716 int rc = IO_OK;
3717 struct CommandList *c;
3718 unsigned char scsi3addr[8];
3719 struct ErrorInfo *ei;
3720
3721 c = cmd_alloc(h);
3722
3723
3724 memset(scsi3addr, 0, sizeof(scsi3addr));
3725 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3726 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3727 rc = -EAGAIN;
3728 goto out;
3729 }
3730 if (extended_response)
3731 c->Request.CDB[1] = extended_response;
3732 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3733 NO_TIMEOUT);
3734 if (rc)
3735 goto out;
3736 ei = c->err_info;
3737 if (ei->CommandStatus != 0 &&
3738 ei->CommandStatus != CMD_DATA_UNDERRUN) {
3739 hpsa_scsi_interpret_error(h, c);
3740 rc = -EIO;
3741 } else {
3742 struct ReportLUNdata *rld = buf;
3743
3744 if (rld->extended_response_flag != extended_response) {
3745 if (!h->legacy_board) {
3746 dev_err(&h->pdev->dev,
3747 "report luns requested format %u, got %u\n",
3748 extended_response,
3749 rld->extended_response_flag);
3750 rc = -EINVAL;
3751 } else
3752 rc = -EOPNOTSUPP;
3753 }
3754 }
3755 out:
3756 cmd_free(h, c);
3757 return rc;
3758 }
3759
3760 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3761 struct ReportExtendedLUNdata *buf, int bufsize)
3762 {
3763 int rc;
3764 struct ReportLUNdata *lbuf;
3765
3766 rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3767 HPSA_REPORT_PHYS_EXTENDED);
3768 if (!rc || rc != -EOPNOTSUPP)
3769 return rc;
3770
3771
3772 lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
3773 if (!lbuf)
3774 return -ENOMEM;
3775
3776 rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
3777 if (!rc) {
3778 int i;
3779 u32 nphys;
3780
3781
3782 memcpy(buf, lbuf, 8);
3783 nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
3784 for (i = 0; i < nphys; i++)
3785 memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
3786 }
3787 kfree(lbuf);
3788 return rc;
3789 }
3790
3791 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3792 struct ReportLUNdata *buf, int bufsize)
3793 {
3794 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3795 }
3796
3797 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3798 int bus, int target, int lun)
3799 {
3800 device->bus = bus;
3801 device->target = target;
3802 device->lun = lun;
3803 }
3804
3805
3806 static int hpsa_get_volume_status(struct ctlr_info *h,
3807 unsigned char scsi3addr[])
3808 {
3809 int rc;
3810 int status;
3811 int size;
3812 unsigned char *buf;
3813
3814 buf = kzalloc(64, GFP_KERNEL);
3815 if (!buf)
3816 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3817
3818
3819 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3820 goto exit_failed;
3821
3822
3823 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3824 buf, HPSA_VPD_HEADER_SZ);
3825 if (rc != 0)
3826 goto exit_failed;
3827 size = buf[3];
3828
3829
3830 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3831 buf, size + HPSA_VPD_HEADER_SZ);
3832 if (rc != 0)
3833 goto exit_failed;
3834 status = buf[4];
3835
3836 kfree(buf);
3837 return status;
3838 exit_failed:
3839 kfree(buf);
3840 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3841 }
3842
3843
3844
3845
3846
3847
3848
3849
3850 static unsigned char hpsa_volume_offline(struct ctlr_info *h,
3851 unsigned char scsi3addr[])
3852 {
3853 struct CommandList *c;
3854 unsigned char *sense;
3855 u8 sense_key, asc, ascq;
3856 int sense_len;
3857 int rc, ldstat = 0;
3858 u16 cmd_status;
3859 u8 scsi_status;
3860 #define ASC_LUN_NOT_READY 0x04
3861 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3862 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3863
3864 c = cmd_alloc(h);
3865
3866 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3867 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3868 NO_TIMEOUT);
3869 if (rc) {
3870 cmd_free(h, c);
3871 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3872 }
3873 sense = c->err_info->SenseInfo;
3874 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3875 sense_len = sizeof(c->err_info->SenseInfo);
3876 else
3877 sense_len = c->err_info->SenseLen;
3878 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3879 cmd_status = c->err_info->CommandStatus;
3880 scsi_status = c->err_info->ScsiStatus;
3881 cmd_free(h, c);
3882
3883
3884 ldstat = hpsa_get_volume_status(h, scsi3addr);
3885
3886
3887 switch (ldstat) {
3888 case HPSA_LV_FAILED:
3889 case HPSA_LV_UNDERGOING_ERASE:
3890 case HPSA_LV_NOT_AVAILABLE:
3891 case HPSA_LV_UNDERGOING_RPI:
3892 case HPSA_LV_PENDING_RPI:
3893 case HPSA_LV_ENCRYPTED_NO_KEY:
3894 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3895 case HPSA_LV_UNDERGOING_ENCRYPTION:
3896 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3897 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3898 return ldstat;
3899 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3900
3901
3902
3903 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3904 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3905 return ldstat;
3906 break;
3907 default:
3908 break;
3909 }
3910 return HPSA_LV_OK;
3911 }
3912
3913 static int hpsa_update_device_info(struct ctlr_info *h,
3914 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3915 unsigned char *is_OBDR_device)
3916 {
3917
3918 #define OBDR_SIG_OFFSET 43
3919 #define OBDR_TAPE_SIG "$DR-10"
3920 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3921 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3922
3923 unsigned char *inq_buff;
3924 unsigned char *obdr_sig;
3925 int rc = 0;
3926
3927 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3928 if (!inq_buff) {
3929 rc = -ENOMEM;
3930 goto bail_out;
3931 }
3932
3933
3934 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3935 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3936 dev_err(&h->pdev->dev,
3937 "%s: inquiry failed, device will be skipped.\n",
3938 __func__);
3939 rc = HPSA_INQUIRY_FAILED;
3940 goto bail_out;
3941 }
3942
3943 scsi_sanitize_inquiry_string(&inq_buff[8], 8);
3944 scsi_sanitize_inquiry_string(&inq_buff[16], 16);
3945
3946 this_device->devtype = (inq_buff[0] & 0x1f);
3947 memcpy(this_device->scsi3addr, scsi3addr, 8);
3948 memcpy(this_device->vendor, &inq_buff[8],
3949 sizeof(this_device->vendor));
3950 memcpy(this_device->model, &inq_buff[16],
3951 sizeof(this_device->model));
3952 this_device->rev = inq_buff[2];
3953 memset(this_device->device_id, 0,
3954 sizeof(this_device->device_id));
3955 if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3956 sizeof(this_device->device_id)) < 0) {
3957 dev_err(&h->pdev->dev,
3958 "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n",
3959 h->ctlr, __func__,
3960 h->scsi_host->host_no,
3961 this_device->bus, this_device->target,
3962 this_device->lun,
3963 scsi_device_type(this_device->devtype),
3964 this_device->model);
3965 rc = HPSA_LV_FAILED;
3966 goto bail_out;
3967 }
3968
3969 if ((this_device->devtype == TYPE_DISK ||
3970 this_device->devtype == TYPE_ZBC) &&
3971 is_logical_dev_addr_mode(scsi3addr)) {
3972 unsigned char volume_offline;
3973
3974 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3975 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3976 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3977 volume_offline = hpsa_volume_offline(h, scsi3addr);
3978 if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED &&
3979 h->legacy_board) {
3980
3981
3982
3983 dev_info(&h->pdev->dev,
3984 "C0:T%d:L%d Volume status not available, assuming online.\n",
3985 this_device->target, this_device->lun);
3986 volume_offline = 0;
3987 }
3988 this_device->volume_offline = volume_offline;
3989 if (volume_offline == HPSA_LV_FAILED) {
3990 rc = HPSA_LV_FAILED;
3991 dev_err(&h->pdev->dev,
3992 "%s: LV failed, device will be skipped.\n",
3993 __func__);
3994 goto bail_out;
3995 }
3996 } else {
3997 this_device->raid_level = RAID_UNKNOWN;
3998 this_device->offload_config = 0;
3999 this_device->offload_enabled = 0;
4000 this_device->offload_to_be_enabled = 0;
4001 this_device->hba_ioaccel_enabled = 0;
4002 this_device->volume_offline = 0;
4003 this_device->queue_depth = h->nr_cmds;
4004 }
4005
4006 if (this_device->external)
4007 this_device->queue_depth = EXTERNAL_QD;
4008
4009 if (is_OBDR_device) {
4010
4011
4012
4013 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
4014 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
4015 strncmp(obdr_sig, OBDR_TAPE_SIG,
4016 OBDR_SIG_LEN) == 0);
4017 }
4018 kfree(inq_buff);
4019 return 0;
4020
4021 bail_out:
4022 kfree(inq_buff);
4023 return rc;
4024 }
4025
4026
4027
4028
4029
4030
4031
4032 static void figure_bus_target_lun(struct ctlr_info *h,
4033 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
4034 {
4035 u32 lunid = get_unaligned_le32(lunaddrbytes);
4036
4037 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
4038
4039 if (is_hba_lunid(lunaddrbytes)) {
4040 int bus = HPSA_HBA_BUS;
4041
4042 if (!device->rev)
4043 bus = HPSA_LEGACY_HBA_BUS;
4044 hpsa_set_bus_target_lun(device,
4045 bus, 0, lunid & 0x3fff);
4046 } else
4047
4048 hpsa_set_bus_target_lun(device,
4049 HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
4050 return;
4051 }
4052
4053 if (device->external) {
4054 hpsa_set_bus_target_lun(device,
4055 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
4056 lunid & 0x00ff);
4057 return;
4058 }
4059 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
4060 0, lunid & 0x3fff);
4061 }
4062
4063 static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
4064 int i, int nphysicals, int nlocal_logicals)
4065 {
4066
4067
4068
4069 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4070
4071 if (i == raid_ctlr_position)
4072 return 0;
4073
4074 if (i < logicals_start)
4075 return 0;
4076
4077
4078 if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
4079 return 0;
4080
4081 return 1;
4082 }
4083
4084
4085
4086
4087
4088
4089
4090 static int hpsa_gather_lun_info(struct ctlr_info *h,
4091 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
4092 struct ReportLUNdata *logdev, u32 *nlogicals)
4093 {
4094 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
4095 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
4096 return -1;
4097 }
4098 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
4099 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
4100 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
4101 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
4102 *nphysicals = HPSA_MAX_PHYS_LUN;
4103 }
4104 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
4105 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
4106 return -1;
4107 }
4108 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
4109
4110 if (*nlogicals > HPSA_MAX_LUN) {
4111 dev_warn(&h->pdev->dev,
4112 "maximum logical LUNs (%d) exceeded. "
4113 "%d LUNs ignored.\n", HPSA_MAX_LUN,
4114 *nlogicals - HPSA_MAX_LUN);
4115 *nlogicals = HPSA_MAX_LUN;
4116 }
4117 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
4118 dev_warn(&h->pdev->dev,
4119 "maximum logical + physical LUNs (%d) exceeded. "
4120 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
4121 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
4122 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
4123 }
4124 return 0;
4125 }
4126
4127 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
4128 int i, int nphysicals, int nlogicals,
4129 struct ReportExtendedLUNdata *physdev_list,
4130 struct ReportLUNdata *logdev_list)
4131 {
4132
4133
4134
4135
4136
4137 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4138 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
4139
4140 if (i == raid_ctlr_position)
4141 return RAID_CTLR_LUNID;
4142
4143 if (i < logicals_start)
4144 return &physdev_list->LUN[i -
4145 (raid_ctlr_position == 0)].lunid[0];
4146
4147 if (i < last_device)
4148 return &logdev_list->LUN[i - nphysicals -
4149 (raid_ctlr_position == 0)][0];
4150 BUG();
4151 return NULL;
4152 }
4153
4154
4155 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
4156 struct hpsa_scsi_dev_t *dev,
4157 struct ReportExtendedLUNdata *rlep, int rle_index,
4158 struct bmic_identify_physical_device *id_phys)
4159 {
4160 int rc;
4161 struct ext_report_lun_entry *rle;
4162
4163 rle = &rlep->LUN[rle_index];
4164
4165 dev->ioaccel_handle = rle->ioaccel_handle;
4166 if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
4167 dev->hba_ioaccel_enabled = 1;
4168 memset(id_phys, 0, sizeof(*id_phys));
4169 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
4170 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
4171 sizeof(*id_phys));
4172 if (!rc)
4173
4174 #define DRIVE_CMDS_RESERVED_FOR_FW 2
4175 #define DRIVE_QUEUE_DEPTH 7
4176 dev->queue_depth =
4177 le16_to_cpu(id_phys->current_queue_depth_limit) -
4178 DRIVE_CMDS_RESERVED_FOR_FW;
4179 else
4180 dev->queue_depth = DRIVE_QUEUE_DEPTH;
4181 }
4182
4183 static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
4184 struct ReportExtendedLUNdata *rlep, int rle_index,
4185 struct bmic_identify_physical_device *id_phys)
4186 {
4187 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
4188
4189 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
4190 this_device->hba_ioaccel_enabled = 1;
4191
4192 memcpy(&this_device->active_path_index,
4193 &id_phys->active_path_number,
4194 sizeof(this_device->active_path_index));
4195 memcpy(&this_device->path_map,
4196 &id_phys->redundant_path_present_map,
4197 sizeof(this_device->path_map));
4198 memcpy(&this_device->box,
4199 &id_phys->alternate_paths_phys_box_on_port,
4200 sizeof(this_device->box));
4201 memcpy(&this_device->phys_connector,
4202 &id_phys->alternate_paths_phys_connector,
4203 sizeof(this_device->phys_connector));
4204 memcpy(&this_device->bay,
4205 &id_phys->phys_bay_in_box,
4206 sizeof(this_device->bay));
4207 }
4208
4209
4210 static int hpsa_set_local_logical_count(struct ctlr_info *h,
4211 struct bmic_identify_controller *id_ctlr,
4212 u32 *nlocals)
4213 {
4214 int rc;
4215
4216 if (!id_ctlr) {
4217 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
4218 __func__);
4219 return -ENOMEM;
4220 }
4221 memset(id_ctlr, 0, sizeof(*id_ctlr));
4222 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4223 if (!rc)
4224 if (id_ctlr->configured_logical_drive_count < 255)
4225 *nlocals = id_ctlr->configured_logical_drive_count;
4226 else
4227 *nlocals = le16_to_cpu(
4228 id_ctlr->extended_logical_unit_count);
4229 else
4230 *nlocals = -1;
4231 return rc;
4232 }
4233
4234 static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
4235 {
4236 struct bmic_identify_physical_device *id_phys;
4237 bool is_spare = false;
4238 int rc;
4239
4240 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4241 if (!id_phys)
4242 return false;
4243
4244 rc = hpsa_bmic_id_physical_device(h,
4245 lunaddrbytes,
4246 GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
4247 id_phys, sizeof(*id_phys));
4248 if (rc == 0)
4249 is_spare = (id_phys->more_flags >> 6) & 0x01;
4250
4251 kfree(id_phys);
4252 return is_spare;
4253 }
4254
4255 #define RPL_DEV_FLAG_NON_DISK 0x1
4256 #define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
4257 #define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
4258
4259 #define BMIC_DEVICE_TYPE_ENCLOSURE 6
4260
4261 static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
4262 struct ext_report_lun_entry *rle)
4263 {
4264 u8 device_flags;
4265 u8 device_type;
4266
4267 if (!MASKED_DEVICE(lunaddrbytes))
4268 return false;
4269
4270 device_flags = rle->device_flags;
4271 device_type = rle->device_type;
4272
4273 if (device_flags & RPL_DEV_FLAG_NON_DISK) {
4274 if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
4275 return false;
4276 return true;
4277 }
4278
4279 if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
4280 return false;
4281
4282 if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
4283 return false;
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293 if (hpsa_is_disk_spare(h, lunaddrbytes))
4294 return true;
4295
4296 return false;
4297 }
4298
4299 static void hpsa_update_scsi_devices(struct ctlr_info *h)
4300 {
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311 struct ReportExtendedLUNdata *physdev_list = NULL;
4312 struct ReportLUNdata *logdev_list = NULL;
4313 struct bmic_identify_physical_device *id_phys = NULL;
4314 struct bmic_identify_controller *id_ctlr = NULL;
4315 u32 nphysicals = 0;
4316 u32 nlogicals = 0;
4317 u32 nlocal_logicals = 0;
4318 u32 ndev_allocated = 0;
4319 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
4320 int ncurrent = 0;
4321 int i, n_ext_target_devs, ndevs_to_allocate;
4322 int raid_ctlr_position;
4323 bool physical_device;
4324 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
4325
4326 currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
4327 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
4328 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
4329 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
4330 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4331 id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
4332
4333 if (!currentsd || !physdev_list || !logdev_list ||
4334 !tmpdevice || !id_phys || !id_ctlr) {
4335 dev_err(&h->pdev->dev, "out of memory\n");
4336 goto out;
4337 }
4338 memset(lunzerobits, 0, sizeof(lunzerobits));
4339
4340 h->drv_req_rescan = 0;
4341
4342 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
4343 logdev_list, &nlogicals)) {
4344 h->drv_req_rescan = 1;
4345 goto out;
4346 }
4347
4348
4349 if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
4350 dev_warn(&h->pdev->dev,
4351 "%s: Can't determine number of local logical devices.\n",
4352 __func__);
4353 }
4354
4355
4356
4357
4358
4359 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
4360
4361 hpsa_ext_ctrl_present(h, physdev_list);
4362
4363
4364 for (i = 0; i < ndevs_to_allocate; i++) {
4365 if (i >= HPSA_MAX_DEVICES) {
4366 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
4367 " %d devices ignored.\n", HPSA_MAX_DEVICES,
4368 ndevs_to_allocate - HPSA_MAX_DEVICES);
4369 break;
4370 }
4371
4372 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4373 if (!currentsd[i]) {
4374 h->drv_req_rescan = 1;
4375 goto out;
4376 }
4377 ndev_allocated++;
4378 }
4379
4380 if (is_scsi_rev_5(h))
4381 raid_ctlr_position = 0;
4382 else
4383 raid_ctlr_position = nphysicals + nlogicals;
4384
4385
4386 n_ext_target_devs = 0;
4387 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
4388 u8 *lunaddrbytes, is_OBDR = 0;
4389 int rc = 0;
4390 int phys_dev_index = i - (raid_ctlr_position == 0);
4391 bool skip_device = false;
4392
4393 memset(tmpdevice, 0, sizeof(*tmpdevice));
4394
4395 physical_device = i < nphysicals + (raid_ctlr_position == 0);
4396
4397
4398 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4399 i, nphysicals, nlogicals, physdev_list, logdev_list);
4400
4401
4402 tmpdevice->external =
4403 figure_external_status(h, raid_ctlr_position, i,
4404 nphysicals, nlocal_logicals);
4405
4406
4407
4408
4409 if (!tmpdevice->external && physical_device) {
4410 skip_device = hpsa_skip_device(h, lunaddrbytes,
4411 &physdev_list->LUN[phys_dev_index]);
4412 if (skip_device)
4413 continue;
4414 }
4415
4416
4417 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4418 &is_OBDR);
4419 if (rc == -ENOMEM) {
4420 dev_warn(&h->pdev->dev,
4421 "Out of memory, rescan deferred.\n");
4422 h->drv_req_rescan = 1;
4423 goto out;
4424 }
4425 if (rc) {
4426 h->drv_req_rescan = 1;
4427 continue;
4428 }
4429
4430 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4431 this_device = currentsd[ncurrent];
4432
4433 *this_device = *tmpdevice;
4434 this_device->physical_device = physical_device;
4435
4436
4437
4438
4439
4440 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
4441 this_device->expose_device = 0;
4442 else
4443 this_device->expose_device = 1;
4444
4445
4446
4447
4448
4449 if (this_device->physical_device && this_device->expose_device)
4450 hpsa_get_sas_address(h, lunaddrbytes, this_device);
4451
4452 switch (this_device->devtype) {
4453 case TYPE_ROM:
4454
4455
4456
4457
4458
4459
4460
4461 if (is_OBDR)
4462 ncurrent++;
4463 break;
4464 case TYPE_DISK:
4465 case TYPE_ZBC:
4466 if (this_device->physical_device) {
4467
4468
4469 this_device->offload_enabled = 0;
4470 hpsa_get_ioaccel_drive_info(h, this_device,
4471 physdev_list, phys_dev_index, id_phys);
4472 hpsa_get_path_info(this_device,
4473 physdev_list, phys_dev_index, id_phys);
4474 }
4475 ncurrent++;
4476 break;
4477 case TYPE_TAPE:
4478 case TYPE_MEDIUM_CHANGER:
4479 ncurrent++;
4480 break;
4481 case TYPE_ENCLOSURE:
4482 if (!this_device->external)
4483 hpsa_get_enclosure_info(h, lunaddrbytes,
4484 physdev_list, phys_dev_index,
4485 this_device);
4486 ncurrent++;
4487 break;
4488 case TYPE_RAID:
4489
4490
4491
4492
4493
4494 if (!is_hba_lunid(lunaddrbytes))
4495 break;
4496 ncurrent++;
4497 break;
4498 default:
4499 break;
4500 }
4501 if (ncurrent >= HPSA_MAX_DEVICES)
4502 break;
4503 }
4504
4505 if (h->sas_host == NULL) {
4506 int rc = 0;
4507
4508 rc = hpsa_add_sas_host(h);
4509 if (rc) {
4510 dev_warn(&h->pdev->dev,
4511 "Could not add sas host %d\n", rc);
4512 goto out;
4513 }
4514 }
4515
4516 adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4517 out:
4518 kfree(tmpdevice);
4519 for (i = 0; i < ndev_allocated; i++)
4520 kfree(currentsd[i]);
4521 kfree(currentsd);
4522 kfree(physdev_list);
4523 kfree(logdev_list);
4524 kfree(id_ctlr);
4525 kfree(id_phys);
4526 }
4527
4528 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4529 struct scatterlist *sg)
4530 {
4531 u64 addr64 = (u64) sg_dma_address(sg);
4532 unsigned int len = sg_dma_len(sg);
4533
4534 desc->Addr = cpu_to_le64(addr64);
4535 desc->Len = cpu_to_le32(len);
4536 desc->Ext = 0;
4537 }
4538
4539
4540
4541
4542
4543
4544 static int hpsa_scatter_gather(struct ctlr_info *h,
4545 struct CommandList *cp,
4546 struct scsi_cmnd *cmd)
4547 {
4548 struct scatterlist *sg;
4549 int use_sg, i, sg_limit, chained, last_sg;
4550 struct SGDescriptor *curr_sg;
4551
4552 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4553
4554 use_sg = scsi_dma_map(cmd);
4555 if (use_sg < 0)
4556 return use_sg;
4557
4558 if (!use_sg)
4559 goto sglist_finished;
4560
4561
4562
4563
4564
4565
4566
4567
4568 curr_sg = cp->SG;
4569 chained = use_sg > h->max_cmd_sg_entries;
4570 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4571 last_sg = scsi_sg_count(cmd) - 1;
4572 scsi_for_each_sg(cmd, sg, sg_limit, i) {
4573 hpsa_set_sg_descriptor(curr_sg, sg);
4574 curr_sg++;
4575 }
4576
4577 if (chained) {
4578
4579
4580
4581
4582
4583
4584 curr_sg = h->cmd_sg_list[cp->cmdindex];
4585 sg_limit = use_sg - sg_limit;
4586 for_each_sg(sg, sg, sg_limit, i) {
4587 hpsa_set_sg_descriptor(curr_sg, sg);
4588 curr_sg++;
4589 }
4590 }
4591
4592
4593 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4594
4595 if (use_sg + chained > h->maxSG)
4596 h->maxSG = use_sg + chained;
4597
4598 if (chained) {
4599 cp->Header.SGList = h->max_cmd_sg_entries;
4600 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4601 if (hpsa_map_sg_chain_block(h, cp)) {
4602 scsi_dma_unmap(cmd);
4603 return -1;
4604 }
4605 return 0;
4606 }
4607
4608 sglist_finished:
4609
4610 cp->Header.SGList = (u8) use_sg;
4611 cp->Header.SGTotal = cpu_to_le16(use_sg);
4612 return 0;
4613 }
4614
4615 static inline void warn_zero_length_transfer(struct ctlr_info *h,
4616 u8 *cdb, int cdb_len,
4617 const char *func)
4618 {
4619 dev_warn(&h->pdev->dev,
4620 "%s: Blocking zero-length request: CDB:%*phN\n",
4621 func, cdb_len, cdb);
4622 }
4623
4624 #define IO_ACCEL_INELIGIBLE 1
4625
4626 static bool is_zero_length_transfer(u8 *cdb)
4627 {
4628 u32 block_cnt;
4629
4630
4631 switch (cdb[0]) {
4632 case READ_10:
4633 case WRITE_10:
4634 case VERIFY:
4635 case WRITE_VERIFY:
4636 block_cnt = get_unaligned_be16(&cdb[7]);
4637 break;
4638 case READ_12:
4639 case WRITE_12:
4640 case VERIFY_12:
4641 case WRITE_VERIFY_12:
4642 block_cnt = get_unaligned_be32(&cdb[6]);
4643 break;
4644 case READ_16:
4645 case WRITE_16:
4646 case VERIFY_16:
4647 block_cnt = get_unaligned_be32(&cdb[10]);
4648 break;
4649 default:
4650 return false;
4651 }
4652
4653 return block_cnt == 0;
4654 }
4655
4656 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4657 {
4658 int is_write = 0;
4659 u32 block;
4660 u32 block_cnt;
4661
4662
4663 switch (cdb[0]) {
4664 case WRITE_6:
4665 case WRITE_12:
4666 is_write = 1;
4667
4668 case READ_6:
4669 case READ_12:
4670 if (*cdb_len == 6) {
4671 block = (((cdb[1] & 0x1F) << 16) |
4672 (cdb[2] << 8) |
4673 cdb[3]);
4674 block_cnt = cdb[4];
4675 if (block_cnt == 0)
4676 block_cnt = 256;
4677 } else {
4678 BUG_ON(*cdb_len != 12);
4679 block = get_unaligned_be32(&cdb[2]);
4680 block_cnt = get_unaligned_be32(&cdb[6]);
4681 }
4682 if (block_cnt > 0xffff)
4683 return IO_ACCEL_INELIGIBLE;
4684
4685 cdb[0] = is_write ? WRITE_10 : READ_10;
4686 cdb[1] = 0;
4687 cdb[2] = (u8) (block >> 24);
4688 cdb[3] = (u8) (block >> 16);
4689 cdb[4] = (u8) (block >> 8);
4690 cdb[5] = (u8) (block);
4691 cdb[6] = 0;
4692 cdb[7] = (u8) (block_cnt >> 8);
4693 cdb[8] = (u8) (block_cnt);
4694 cdb[9] = 0;
4695 *cdb_len = 10;
4696 break;
4697 }
4698 return 0;
4699 }
4700
4701 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4702 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4703 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4704 {
4705 struct scsi_cmnd *cmd = c->scsi_cmd;
4706 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4707 unsigned int len;
4708 unsigned int total_len = 0;
4709 struct scatterlist *sg;
4710 u64 addr64;
4711 int use_sg, i;
4712 struct SGDescriptor *curr_sg;
4713 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4714
4715
4716 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4717 atomic_dec(&phys_disk->ioaccel_cmds_out);
4718 return IO_ACCEL_INELIGIBLE;
4719 }
4720
4721 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4722
4723 if (is_zero_length_transfer(cdb)) {
4724 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4725 atomic_dec(&phys_disk->ioaccel_cmds_out);
4726 return IO_ACCEL_INELIGIBLE;
4727 }
4728
4729 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4730 atomic_dec(&phys_disk->ioaccel_cmds_out);
4731 return IO_ACCEL_INELIGIBLE;
4732 }
4733
4734 c->cmd_type = CMD_IOACCEL1;
4735
4736
4737 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4738 (c->cmdindex * sizeof(*cp));
4739 BUG_ON(c->busaddr & 0x0000007F);
4740
4741 use_sg = scsi_dma_map(cmd);
4742 if (use_sg < 0) {
4743 atomic_dec(&phys_disk->ioaccel_cmds_out);
4744 return use_sg;
4745 }
4746
4747 if (use_sg) {
4748 curr_sg = cp->SG;
4749 scsi_for_each_sg(cmd, sg, use_sg, i) {
4750 addr64 = (u64) sg_dma_address(sg);
4751 len = sg_dma_len(sg);
4752 total_len += len;
4753 curr_sg->Addr = cpu_to_le64(addr64);
4754 curr_sg->Len = cpu_to_le32(len);
4755 curr_sg->Ext = cpu_to_le32(0);
4756 curr_sg++;
4757 }
4758 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4759
4760 switch (cmd->sc_data_direction) {
4761 case DMA_TO_DEVICE:
4762 control |= IOACCEL1_CONTROL_DATA_OUT;
4763 break;
4764 case DMA_FROM_DEVICE:
4765 control |= IOACCEL1_CONTROL_DATA_IN;
4766 break;
4767 case DMA_NONE:
4768 control |= IOACCEL1_CONTROL_NODATAXFER;
4769 break;
4770 default:
4771 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4772 cmd->sc_data_direction);
4773 BUG();
4774 break;
4775 }
4776 } else {
4777 control |= IOACCEL1_CONTROL_NODATAXFER;
4778 }
4779
4780 c->Header.SGList = use_sg;
4781
4782 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4783 cp->transfer_len = cpu_to_le32(total_len);
4784 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4785 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4786 cp->control = cpu_to_le32(control);
4787 memcpy(cp->CDB, cdb, cdb_len);
4788 memcpy(cp->CISS_LUN, scsi3addr, 8);
4789
4790 enqueue_cmd_and_start_io(h, c);
4791 return 0;
4792 }
4793
4794
4795
4796
4797
4798 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4799 struct CommandList *c)
4800 {
4801 struct scsi_cmnd *cmd = c->scsi_cmd;
4802 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4803
4804 if (!dev)
4805 return -1;
4806
4807 c->phys_disk = dev;
4808
4809 if (dev->in_reset)
4810 return -1;
4811
4812 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4813 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4814 }
4815
4816
4817
4818
4819 static void set_encrypt_ioaccel2(struct ctlr_info *h,
4820 struct CommandList *c, struct io_accel2_cmd *cp)
4821 {
4822 struct scsi_cmnd *cmd = c->scsi_cmd;
4823 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4824 struct raid_map_data *map = &dev->raid_map;
4825 u64 first_block;
4826
4827
4828 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4829 return;
4830
4831 cp->dekindex = map->dekindex;
4832
4833
4834 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4835
4836
4837
4838
4839
4840 switch (cmd->cmnd[0]) {
4841
4842 case READ_6:
4843 case WRITE_6:
4844 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4845 (cmd->cmnd[2] << 8) |
4846 cmd->cmnd[3]);
4847 break;
4848 case WRITE_10:
4849 case READ_10:
4850
4851 case WRITE_12:
4852 case READ_12:
4853 first_block = get_unaligned_be32(&cmd->cmnd[2]);
4854 break;
4855 case WRITE_16:
4856 case READ_16:
4857 first_block = get_unaligned_be64(&cmd->cmnd[2]);
4858 break;
4859 default:
4860 dev_err(&h->pdev->dev,
4861 "ERROR: %s: size (0x%x) not supported for encryption\n",
4862 __func__, cmd->cmnd[0]);
4863 BUG();
4864 break;
4865 }
4866
4867 if (le32_to_cpu(map->volume_blk_size) != 512)
4868 first_block = first_block *
4869 le32_to_cpu(map->volume_blk_size)/512;
4870
4871 cp->tweak_lower = cpu_to_le32(first_block);
4872 cp->tweak_upper = cpu_to_le32(first_block >> 32);
4873 }
4874
4875 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4876 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4877 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4878 {
4879 struct scsi_cmnd *cmd = c->scsi_cmd;
4880 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4881 struct ioaccel2_sg_element *curr_sg;
4882 int use_sg, i;
4883 struct scatterlist *sg;
4884 u64 addr64;
4885 u32 len;
4886 u32 total_len = 0;
4887
4888 if (!cmd->device)
4889 return -1;
4890
4891 if (!cmd->device->hostdata)
4892 return -1;
4893
4894 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4895
4896 if (is_zero_length_transfer(cdb)) {
4897 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4898 atomic_dec(&phys_disk->ioaccel_cmds_out);
4899 return IO_ACCEL_INELIGIBLE;
4900 }
4901
4902 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4903 atomic_dec(&phys_disk->ioaccel_cmds_out);
4904 return IO_ACCEL_INELIGIBLE;
4905 }
4906
4907 c->cmd_type = CMD_IOACCEL2;
4908
4909 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4910 (c->cmdindex * sizeof(*cp));
4911 BUG_ON(c->busaddr & 0x0000007F);
4912
4913 memset(cp, 0, sizeof(*cp));
4914 cp->IU_type = IOACCEL2_IU_TYPE;
4915
4916 use_sg = scsi_dma_map(cmd);
4917 if (use_sg < 0) {
4918 atomic_dec(&phys_disk->ioaccel_cmds_out);
4919 return use_sg;
4920 }
4921
4922 if (use_sg) {
4923 curr_sg = cp->sg;
4924 if (use_sg > h->ioaccel_maxsg) {
4925 addr64 = le64_to_cpu(
4926 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4927 curr_sg->address = cpu_to_le64(addr64);
4928 curr_sg->length = 0;
4929 curr_sg->reserved[0] = 0;
4930 curr_sg->reserved[1] = 0;
4931 curr_sg->reserved[2] = 0;
4932 curr_sg->chain_indicator = IOACCEL2_CHAIN;
4933
4934 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4935 }
4936 scsi_for_each_sg(cmd, sg, use_sg, i) {
4937 addr64 = (u64) sg_dma_address(sg);
4938 len = sg_dma_len(sg);
4939 total_len += len;
4940 curr_sg->address = cpu_to_le64(addr64);
4941 curr_sg->length = cpu_to_le32(len);
4942 curr_sg->reserved[0] = 0;
4943 curr_sg->reserved[1] = 0;
4944 curr_sg->reserved[2] = 0;
4945 curr_sg->chain_indicator = 0;
4946 curr_sg++;
4947 }
4948
4949
4950
4951
4952 (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
4953
4954 switch (cmd->sc_data_direction) {
4955 case DMA_TO_DEVICE:
4956 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4957 cp->direction |= IOACCEL2_DIR_DATA_OUT;
4958 break;
4959 case DMA_FROM_DEVICE:
4960 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4961 cp->direction |= IOACCEL2_DIR_DATA_IN;
4962 break;
4963 case DMA_NONE:
4964 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4965 cp->direction |= IOACCEL2_DIR_NO_DATA;
4966 break;
4967 default:
4968 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4969 cmd->sc_data_direction);
4970 BUG();
4971 break;
4972 }
4973 } else {
4974 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4975 cp->direction |= IOACCEL2_DIR_NO_DATA;
4976 }
4977
4978
4979 set_encrypt_ioaccel2(h, c, cp);
4980
4981 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
4982 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
4983 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
4984
4985 cp->data_len = cpu_to_le32(total_len);
4986 cp->err_ptr = cpu_to_le64(c->busaddr +
4987 offsetof(struct io_accel2_cmd, error_data));
4988 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4989
4990
4991 if (use_sg > h->ioaccel_maxsg) {
4992 cp->sg_count = 1;
4993 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
4994 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4995 atomic_dec(&phys_disk->ioaccel_cmds_out);
4996 scsi_dma_unmap(cmd);
4997 return -1;
4998 }
4999 } else
5000 cp->sg_count = (u8) use_sg;
5001
5002 if (phys_disk->in_reset) {
5003 cmd->result = DID_RESET << 16;
5004 return -1;
5005 }
5006
5007 enqueue_cmd_and_start_io(h, c);
5008 return 0;
5009 }
5010
5011
5012
5013
5014 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
5015 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
5016 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
5017 {
5018 if (!c->scsi_cmd->device)
5019 return -1;
5020
5021 if (!c->scsi_cmd->device->hostdata)
5022 return -1;
5023
5024 if (phys_disk->in_reset)
5025 return -1;
5026
5027
5028 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
5029 phys_disk->queue_depth) {
5030 atomic_dec(&phys_disk->ioaccel_cmds_out);
5031 return IO_ACCEL_INELIGIBLE;
5032 }
5033 if (h->transMethod & CFGTBL_Trans_io_accel1)
5034 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
5035 cdb, cdb_len, scsi3addr,
5036 phys_disk);
5037 else
5038 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
5039 cdb, cdb_len, scsi3addr,
5040 phys_disk);
5041 }
5042
5043 static void raid_map_helper(struct raid_map_data *map,
5044 int offload_to_mirror, u32 *map_index, u32 *current_group)
5045 {
5046 if (offload_to_mirror == 0) {
5047
5048 *map_index %= le16_to_cpu(map->data_disks_per_row);
5049 return;
5050 }
5051 do {
5052
5053 *current_group = *map_index /
5054 le16_to_cpu(map->data_disks_per_row);
5055 if (offload_to_mirror == *current_group)
5056 continue;
5057 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
5058
5059 *map_index += le16_to_cpu(map->data_disks_per_row);
5060 (*current_group)++;
5061 } else {
5062
5063 *map_index %= le16_to_cpu(map->data_disks_per_row);
5064 *current_group = 0;
5065 }
5066 } while (offload_to_mirror != *current_group);
5067 }
5068
5069
5070
5071
5072 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5073 struct CommandList *c)
5074 {
5075 struct scsi_cmnd *cmd = c->scsi_cmd;
5076 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5077 struct raid_map_data *map = &dev->raid_map;
5078 struct raid_map_disk_data *dd = &map->data[0];
5079 int is_write = 0;
5080 u32 map_index;
5081 u64 first_block, last_block;
5082 u32 block_cnt;
5083 u32 blocks_per_row;
5084 u64 first_row, last_row;
5085 u32 first_row_offset, last_row_offset;
5086 u32 first_column, last_column;
5087 u64 r0_first_row, r0_last_row;
5088 u32 r5or6_blocks_per_row;
5089 u64 r5or6_first_row, r5or6_last_row;
5090 u32 r5or6_first_row_offset, r5or6_last_row_offset;
5091 u32 r5or6_first_column, r5or6_last_column;
5092 u32 total_disks_per_row;
5093 u32 stripesize;
5094 u32 first_group, last_group, current_group;
5095 u32 map_row;
5096 u32 disk_handle;
5097 u64 disk_block;
5098 u32 disk_block_cnt;
5099 u8 cdb[16];
5100 u8 cdb_len;
5101 u16 strip_size;
5102 #if BITS_PER_LONG == 32
5103 u64 tmpdiv;
5104 #endif
5105 int offload_to_mirror;
5106
5107 if (!dev)
5108 return -1;
5109
5110 if (dev->in_reset)
5111 return -1;
5112
5113
5114 switch (cmd->cmnd[0]) {
5115 case WRITE_6:
5116 is_write = 1;
5117
5118 case READ_6:
5119 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
5120 (cmd->cmnd[2] << 8) |
5121 cmd->cmnd[3]);
5122 block_cnt = cmd->cmnd[4];
5123 if (block_cnt == 0)
5124 block_cnt = 256;
5125 break;
5126 case WRITE_10:
5127 is_write = 1;
5128
5129 case READ_10:
5130 first_block =
5131 (((u64) cmd->cmnd[2]) << 24) |
5132 (((u64) cmd->cmnd[3]) << 16) |
5133 (((u64) cmd->cmnd[4]) << 8) |
5134 cmd->cmnd[5];
5135 block_cnt =
5136 (((u32) cmd->cmnd[7]) << 8) |
5137 cmd->cmnd[8];
5138 break;
5139 case WRITE_12:
5140 is_write = 1;
5141
5142 case READ_12:
5143 first_block =
5144 (((u64) cmd->cmnd[2]) << 24) |
5145 (((u64) cmd->cmnd[3]) << 16) |
5146 (((u64) cmd->cmnd[4]) << 8) |
5147 cmd->cmnd[5];
5148 block_cnt =
5149 (((u32) cmd->cmnd[6]) << 24) |
5150 (((u32) cmd->cmnd[7]) << 16) |
5151 (((u32) cmd->cmnd[8]) << 8) |
5152 cmd->cmnd[9];
5153 break;
5154 case WRITE_16:
5155 is_write = 1;
5156
5157 case READ_16:
5158 first_block =
5159 (((u64) cmd->cmnd[2]) << 56) |
5160 (((u64) cmd->cmnd[3]) << 48) |
5161 (((u64) cmd->cmnd[4]) << 40) |
5162 (((u64) cmd->cmnd[5]) << 32) |
5163 (((u64) cmd->cmnd[6]) << 24) |
5164 (((u64) cmd->cmnd[7]) << 16) |
5165 (((u64) cmd->cmnd[8]) << 8) |
5166 cmd->cmnd[9];
5167 block_cnt =
5168 (((u32) cmd->cmnd[10]) << 24) |
5169 (((u32) cmd->cmnd[11]) << 16) |
5170 (((u32) cmd->cmnd[12]) << 8) |
5171 cmd->cmnd[13];
5172 break;
5173 default:
5174 return IO_ACCEL_INELIGIBLE;
5175 }
5176 last_block = first_block + block_cnt - 1;
5177
5178
5179 if (is_write && dev->raid_level != 0)
5180 return IO_ACCEL_INELIGIBLE;
5181
5182
5183 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
5184 last_block < first_block)
5185 return IO_ACCEL_INELIGIBLE;
5186
5187
5188 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
5189 le16_to_cpu(map->strip_size);
5190 strip_size = le16_to_cpu(map->strip_size);
5191 #if BITS_PER_LONG == 32
5192 tmpdiv = first_block;
5193 (void) do_div(tmpdiv, blocks_per_row);
5194 first_row = tmpdiv;
5195 tmpdiv = last_block;
5196 (void) do_div(tmpdiv, blocks_per_row);
5197 last_row = tmpdiv;
5198 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5199 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5200 tmpdiv = first_row_offset;
5201 (void) do_div(tmpdiv, strip_size);
5202 first_column = tmpdiv;
5203 tmpdiv = last_row_offset;
5204 (void) do_div(tmpdiv, strip_size);
5205 last_column = tmpdiv;
5206 #else
5207 first_row = first_block / blocks_per_row;
5208 last_row = last_block / blocks_per_row;
5209 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5210 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5211 first_column = first_row_offset / strip_size;
5212 last_column = last_row_offset / strip_size;
5213 #endif
5214
5215
5216 if ((first_row != last_row) || (first_column != last_column))
5217 return IO_ACCEL_INELIGIBLE;
5218
5219
5220 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
5221 le16_to_cpu(map->metadata_disks_per_row);
5222 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5223 le16_to_cpu(map->row_cnt);
5224 map_index = (map_row * total_disks_per_row) + first_column;
5225
5226 switch (dev->raid_level) {
5227 case HPSA_RAID_0:
5228 break;
5229 case HPSA_RAID_1:
5230
5231
5232
5233
5234 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
5235 if (dev->offload_to_mirror)
5236 map_index += le16_to_cpu(map->data_disks_per_row);
5237 dev->offload_to_mirror = !dev->offload_to_mirror;
5238 break;
5239 case HPSA_RAID_ADM:
5240
5241
5242
5243 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
5244
5245 offload_to_mirror = dev->offload_to_mirror;
5246 raid_map_helper(map, offload_to_mirror,
5247 &map_index, ¤t_group);
5248
5249 offload_to_mirror =
5250 (offload_to_mirror >=
5251 le16_to_cpu(map->layout_map_count) - 1)
5252 ? 0 : offload_to_mirror + 1;
5253 dev->offload_to_mirror = offload_to_mirror;
5254
5255
5256
5257
5258 break;
5259 case HPSA_RAID_5:
5260 case HPSA_RAID_6:
5261 if (le16_to_cpu(map->layout_map_count) <= 1)
5262 break;
5263
5264
5265 r5or6_blocks_per_row =
5266 le16_to_cpu(map->strip_size) *
5267 le16_to_cpu(map->data_disks_per_row);
5268 BUG_ON(r5or6_blocks_per_row == 0);
5269 stripesize = r5or6_blocks_per_row *
5270 le16_to_cpu(map->layout_map_count);
5271 #if BITS_PER_LONG == 32
5272 tmpdiv = first_block;
5273 first_group = do_div(tmpdiv, stripesize);
5274 tmpdiv = first_group;
5275 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5276 first_group = tmpdiv;
5277 tmpdiv = last_block;
5278 last_group = do_div(tmpdiv, stripesize);
5279 tmpdiv = last_group;
5280 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5281 last_group = tmpdiv;
5282 #else
5283 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
5284 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
5285 #endif
5286 if (first_group != last_group)
5287 return IO_ACCEL_INELIGIBLE;
5288
5289
5290 #if BITS_PER_LONG == 32
5291 tmpdiv = first_block;
5292 (void) do_div(tmpdiv, stripesize);
5293 first_row = r5or6_first_row = r0_first_row = tmpdiv;
5294 tmpdiv = last_block;
5295 (void) do_div(tmpdiv, stripesize);
5296 r5or6_last_row = r0_last_row = tmpdiv;
5297 #else
5298 first_row = r5or6_first_row = r0_first_row =
5299 first_block / stripesize;
5300 r5or6_last_row = r0_last_row = last_block / stripesize;
5301 #endif
5302 if (r5or6_first_row != r5or6_last_row)
5303 return IO_ACCEL_INELIGIBLE;
5304
5305
5306
5307 #if BITS_PER_LONG == 32
5308 tmpdiv = first_block;
5309 first_row_offset = do_div(tmpdiv, stripesize);
5310 tmpdiv = first_row_offset;
5311 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
5312 r5or6_first_row_offset = first_row_offset;
5313 tmpdiv = last_block;
5314 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
5315 tmpdiv = r5or6_last_row_offset;
5316 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
5317 tmpdiv = r5or6_first_row_offset;
5318 (void) do_div(tmpdiv, map->strip_size);
5319 first_column = r5or6_first_column = tmpdiv;
5320 tmpdiv = r5or6_last_row_offset;
5321 (void) do_div(tmpdiv, map->strip_size);
5322 r5or6_last_column = tmpdiv;
5323 #else
5324 first_row_offset = r5or6_first_row_offset =
5325 (u32)((first_block % stripesize) %
5326 r5or6_blocks_per_row);
5327
5328 r5or6_last_row_offset =
5329 (u32)((last_block % stripesize) %
5330 r5or6_blocks_per_row);
5331
5332 first_column = r5or6_first_column =
5333 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
5334 r5or6_last_column =
5335 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
5336 #endif
5337 if (r5or6_first_column != r5or6_last_column)
5338 return IO_ACCEL_INELIGIBLE;
5339
5340
5341 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5342 le16_to_cpu(map->row_cnt);
5343
5344 map_index = (first_group *
5345 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
5346 (map_row * total_disks_per_row) + first_column;
5347 break;
5348 default:
5349 return IO_ACCEL_INELIGIBLE;
5350 }
5351
5352 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
5353 return IO_ACCEL_INELIGIBLE;
5354
5355 c->phys_disk = dev->phys_disk[map_index];
5356 if (!c->phys_disk)
5357 return IO_ACCEL_INELIGIBLE;
5358
5359 disk_handle = dd[map_index].ioaccel_handle;
5360 disk_block = le64_to_cpu(map->disk_starting_blk) +
5361 first_row * le16_to_cpu(map->strip_size) +
5362 (first_row_offset - first_column *
5363 le16_to_cpu(map->strip_size));
5364 disk_block_cnt = block_cnt;
5365
5366
5367 if (map->phys_blk_shift) {
5368 disk_block <<= map->phys_blk_shift;
5369 disk_block_cnt <<= map->phys_blk_shift;
5370 }
5371 BUG_ON(disk_block_cnt > 0xffff);
5372
5373
5374 if (disk_block > 0xffffffff) {
5375 cdb[0] = is_write ? WRITE_16 : READ_16;
5376 cdb[1] = 0;
5377 cdb[2] = (u8) (disk_block >> 56);
5378 cdb[3] = (u8) (disk_block >> 48);
5379 cdb[4] = (u8) (disk_block >> 40);
5380 cdb[5] = (u8) (disk_block >> 32);
5381 cdb[6] = (u8) (disk_block >> 24);
5382 cdb[7] = (u8) (disk_block >> 16);
5383 cdb[8] = (u8) (disk_block >> 8);
5384 cdb[9] = (u8) (disk_block);
5385 cdb[10] = (u8) (disk_block_cnt >> 24);
5386 cdb[11] = (u8) (disk_block_cnt >> 16);
5387 cdb[12] = (u8) (disk_block_cnt >> 8);
5388 cdb[13] = (u8) (disk_block_cnt);
5389 cdb[14] = 0;
5390 cdb[15] = 0;
5391 cdb_len = 16;
5392 } else {
5393 cdb[0] = is_write ? WRITE_10 : READ_10;
5394 cdb[1] = 0;
5395 cdb[2] = (u8) (disk_block >> 24);
5396 cdb[3] = (u8) (disk_block >> 16);
5397 cdb[4] = (u8) (disk_block >> 8);
5398 cdb[5] = (u8) (disk_block);
5399 cdb[6] = 0;
5400 cdb[7] = (u8) (disk_block_cnt >> 8);
5401 cdb[8] = (u8) (disk_block_cnt);
5402 cdb[9] = 0;
5403 cdb_len = 10;
5404 }
5405 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
5406 dev->scsi3addr,
5407 dev->phys_disk[map_index]);
5408 }
5409
5410
5411
5412
5413
5414
5415 static int hpsa_ciss_submit(struct ctlr_info *h,
5416 struct CommandList *c, struct scsi_cmnd *cmd,
5417 struct hpsa_scsi_dev_t *dev)
5418 {
5419 cmd->host_scribble = (unsigned char *) c;
5420 c->cmd_type = CMD_SCSI;
5421 c->scsi_cmd = cmd;
5422 c->Header.ReplyQueue = 0;
5423 memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8);
5424 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
5425
5426
5427
5428 c->Request.Timeout = 0;
5429 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
5430 c->Request.CDBLen = cmd->cmd_len;
5431 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
5432 switch (cmd->sc_data_direction) {
5433 case DMA_TO_DEVICE:
5434 c->Request.type_attr_dir =
5435 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
5436 break;
5437 case DMA_FROM_DEVICE:
5438 c->Request.type_attr_dir =
5439 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
5440 break;
5441 case DMA_NONE:
5442 c->Request.type_attr_dir =
5443 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
5444 break;
5445 case DMA_BIDIRECTIONAL:
5446
5447
5448
5449
5450
5451 c->Request.type_attr_dir =
5452 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
5453
5454
5455
5456
5457
5458
5459
5460
5461 break;
5462
5463 default:
5464 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5465 cmd->sc_data_direction);
5466 BUG();
5467 break;
5468 }
5469
5470 if (hpsa_scatter_gather(h, c, cmd) < 0) {
5471 hpsa_cmd_resolve_and_free(h, c);
5472 return SCSI_MLQUEUE_HOST_BUSY;
5473 }
5474
5475 if (dev->in_reset) {
5476 hpsa_cmd_resolve_and_free(h, c);
5477 return SCSI_MLQUEUE_HOST_BUSY;
5478 }
5479
5480 c->device = dev;
5481
5482 enqueue_cmd_and_start_io(h, c);
5483
5484 return 0;
5485 }
5486
5487 static void hpsa_cmd_init(struct ctlr_info *h, int index,
5488 struct CommandList *c)
5489 {
5490 dma_addr_t cmd_dma_handle, err_dma_handle;
5491
5492
5493 memset(c, 0, offsetof(struct CommandList, refcount));
5494 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
5495 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5496 c->err_info = h->errinfo_pool + index;
5497 memset(c->err_info, 0, sizeof(*c->err_info));
5498 err_dma_handle = h->errinfo_pool_dhandle
5499 + index * sizeof(*c->err_info);
5500 c->cmdindex = index;
5501 c->busaddr = (u32) cmd_dma_handle;
5502 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5503 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5504 c->h = h;
5505 c->scsi_cmd = SCSI_CMD_IDLE;
5506 }
5507
5508 static void hpsa_preinitialize_commands(struct ctlr_info *h)
5509 {
5510 int i;
5511
5512 for (i = 0; i < h->nr_cmds; i++) {
5513 struct CommandList *c = h->cmd_pool + i;
5514
5515 hpsa_cmd_init(h, i, c);
5516 atomic_set(&c->refcount, 0);
5517 }
5518 }
5519
5520 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
5521 struct CommandList *c)
5522 {
5523 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5524
5525 BUG_ON(c->cmdindex != index);
5526
5527 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
5528 memset(c->err_info, 0, sizeof(*c->err_info));
5529 c->busaddr = (u32) cmd_dma_handle;
5530 }
5531
5532 static int hpsa_ioaccel_submit(struct ctlr_info *h,
5533 struct CommandList *c, struct scsi_cmnd *cmd)
5534 {
5535 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5536 int rc = IO_ACCEL_INELIGIBLE;
5537
5538 if (!dev)
5539 return SCSI_MLQUEUE_HOST_BUSY;
5540
5541 if (dev->in_reset)
5542 return SCSI_MLQUEUE_HOST_BUSY;
5543
5544 if (hpsa_simple_mode)
5545 return IO_ACCEL_INELIGIBLE;
5546
5547 cmd->host_scribble = (unsigned char *) c;
5548
5549 if (dev->offload_enabled) {
5550 hpsa_cmd_init(h, c->cmdindex, c);
5551 c->cmd_type = CMD_SCSI;
5552 c->scsi_cmd = cmd;
5553 c->device = dev;
5554 rc = hpsa_scsi_ioaccel_raid_map(h, c);
5555 if (rc < 0)
5556 rc = SCSI_MLQUEUE_HOST_BUSY;
5557 } else if (dev->hba_ioaccel_enabled) {
5558 hpsa_cmd_init(h, c->cmdindex, c);
5559 c->cmd_type = CMD_SCSI;
5560 c->scsi_cmd = cmd;
5561 c->device = dev;
5562 rc = hpsa_scsi_ioaccel_direct_map(h, c);
5563 if (rc < 0)
5564 rc = SCSI_MLQUEUE_HOST_BUSY;
5565 }
5566 return rc;
5567 }
5568
5569 static void hpsa_command_resubmit_worker(struct work_struct *work)
5570 {
5571 struct scsi_cmnd *cmd;
5572 struct hpsa_scsi_dev_t *dev;
5573 struct CommandList *c = container_of(work, struct CommandList, work);
5574
5575 cmd = c->scsi_cmd;
5576 dev = cmd->device->hostdata;
5577 if (!dev) {
5578 cmd->result = DID_NO_CONNECT << 16;
5579 return hpsa_cmd_free_and_done(c->h, c, cmd);
5580 }
5581
5582 if (dev->in_reset) {
5583 cmd->result = DID_RESET << 16;
5584 return hpsa_cmd_free_and_done(c->h, c, cmd);
5585 }
5586
5587 if (c->cmd_type == CMD_IOACCEL2) {
5588 struct ctlr_info *h = c->h;
5589 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5590 int rc;
5591
5592 if (c2->error_data.serv_response ==
5593 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
5594 rc = hpsa_ioaccel_submit(h, c, cmd);
5595 if (rc == 0)
5596 return;
5597 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5598
5599
5600
5601
5602
5603 cmd->result = DID_IMM_RETRY << 16;
5604 return hpsa_cmd_free_and_done(h, c, cmd);
5605 }
5606
5607 }
5608 }
5609 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
5610 if (hpsa_ciss_submit(c->h, c, cmd, dev)) {
5611
5612
5613
5614
5615
5616
5617
5618
5619 cmd->result = DID_IMM_RETRY << 16;
5620 cmd->scsi_done(cmd);
5621 }
5622 }
5623
5624
5625 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5626 {
5627 struct ctlr_info *h;
5628 struct hpsa_scsi_dev_t *dev;
5629 struct CommandList *c;
5630 int rc = 0;
5631
5632
5633 h = sdev_to_hba(cmd->device);
5634
5635 BUG_ON(cmd->request->tag < 0);
5636
5637 dev = cmd->device->hostdata;
5638 if (!dev) {
5639 cmd->result = DID_NO_CONNECT << 16;
5640 cmd->scsi_done(cmd);
5641 return 0;
5642 }
5643
5644 if (dev->removed) {
5645 cmd->result = DID_NO_CONNECT << 16;
5646 cmd->scsi_done(cmd);
5647 return 0;
5648 }
5649
5650 if (unlikely(lockup_detected(h))) {
5651 cmd->result = DID_NO_CONNECT << 16;
5652 cmd->scsi_done(cmd);
5653 return 0;
5654 }
5655
5656 if (dev->in_reset)
5657 return SCSI_MLQUEUE_DEVICE_BUSY;
5658
5659 c = cmd_tagged_alloc(h, cmd);
5660 if (c == NULL)
5661 return SCSI_MLQUEUE_DEVICE_BUSY;
5662
5663
5664
5665
5666
5667 cmd->result = 0;
5668
5669
5670
5671
5672
5673 if (likely(cmd->retries == 0 &&
5674 !blk_rq_is_passthrough(cmd->request) &&
5675 h->acciopath_status)) {
5676 rc = hpsa_ioaccel_submit(h, c, cmd);
5677 if (rc == 0)
5678 return 0;
5679 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5680 hpsa_cmd_resolve_and_free(h, c);
5681 return SCSI_MLQUEUE_HOST_BUSY;
5682 }
5683 }
5684 return hpsa_ciss_submit(h, c, cmd, dev);
5685 }
5686
5687 static void hpsa_scan_complete(struct ctlr_info *h)
5688 {
5689 unsigned long flags;
5690
5691 spin_lock_irqsave(&h->scan_lock, flags);
5692 h->scan_finished = 1;
5693 wake_up(&h->scan_wait_queue);
5694 spin_unlock_irqrestore(&h->scan_lock, flags);
5695 }
5696
5697 static void hpsa_scan_start(struct Scsi_Host *sh)
5698 {
5699 struct ctlr_info *h = shost_to_hba(sh);
5700 unsigned long flags;
5701
5702
5703
5704
5705
5706
5707
5708 if (unlikely(lockup_detected(h)))
5709 return hpsa_scan_complete(h);
5710
5711
5712
5713
5714 spin_lock_irqsave(&h->scan_lock, flags);
5715 if (h->scan_waiting) {
5716 spin_unlock_irqrestore(&h->scan_lock, flags);
5717 return;
5718 }
5719
5720 spin_unlock_irqrestore(&h->scan_lock, flags);
5721
5722
5723 while (1) {
5724 spin_lock_irqsave(&h->scan_lock, flags);
5725 if (h->scan_finished)
5726 break;
5727 h->scan_waiting = 1;
5728 spin_unlock_irqrestore(&h->scan_lock, flags);
5729 wait_event(h->scan_wait_queue, h->scan_finished);
5730
5731
5732
5733
5734
5735 }
5736 h->scan_finished = 0;
5737 h->scan_waiting = 0;
5738 spin_unlock_irqrestore(&h->scan_lock, flags);
5739
5740 if (unlikely(lockup_detected(h)))
5741 return hpsa_scan_complete(h);
5742
5743
5744
5745
5746 spin_lock_irqsave(&h->reset_lock, flags);
5747 if (h->reset_in_progress) {
5748 h->drv_req_rescan = 1;
5749 spin_unlock_irqrestore(&h->reset_lock, flags);
5750 hpsa_scan_complete(h);
5751 return;
5752 }
5753 spin_unlock_irqrestore(&h->reset_lock, flags);
5754
5755 hpsa_update_scsi_devices(h);
5756
5757 hpsa_scan_complete(h);
5758 }
5759
5760 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5761 {
5762 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5763
5764 if (!logical_drive)
5765 return -ENODEV;
5766
5767 if (qdepth < 1)
5768 qdepth = 1;
5769 else if (qdepth > logical_drive->queue_depth)
5770 qdepth = logical_drive->queue_depth;
5771
5772 return scsi_change_queue_depth(sdev, qdepth);
5773 }
5774
5775 static int hpsa_scan_finished(struct Scsi_Host *sh,
5776 unsigned long elapsed_time)
5777 {
5778 struct ctlr_info *h = shost_to_hba(sh);
5779 unsigned long flags;
5780 int finished;
5781
5782 spin_lock_irqsave(&h->scan_lock, flags);
5783 finished = h->scan_finished;
5784 spin_unlock_irqrestore(&h->scan_lock, flags);
5785 return finished;
5786 }
5787
5788 static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5789 {
5790 struct Scsi_Host *sh;
5791
5792 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5793 if (sh == NULL) {
5794 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5795 return -ENOMEM;
5796 }
5797
5798 sh->io_port = 0;
5799 sh->n_io_port = 0;
5800 sh->this_id = -1;
5801 sh->max_channel = 3;
5802 sh->max_cmd_len = MAX_COMMAND_SIZE;
5803 sh->max_lun = HPSA_MAX_LUN;
5804 sh->max_id = HPSA_MAX_LUN;
5805 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5806 sh->cmd_per_lun = sh->can_queue;
5807 sh->sg_tablesize = h->maxsgentries;
5808 sh->transportt = hpsa_sas_transport_template;
5809 sh->hostdata[0] = (unsigned long) h;
5810 sh->irq = pci_irq_vector(h->pdev, 0);
5811 sh->unique_id = sh->irq;
5812
5813 h->scsi_host = sh;
5814 return 0;
5815 }
5816
5817 static int hpsa_scsi_add_host(struct ctlr_info *h)
5818 {
5819 int rv;
5820
5821 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5822 if (rv) {
5823 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5824 return rv;
5825 }
5826 scsi_scan_host(h->scsi_host);
5827 return 0;
5828 }
5829
5830
5831
5832
5833
5834
5835
5836 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5837 {
5838 int idx = scmd->request->tag;
5839
5840 if (idx < 0)
5841 return idx;
5842
5843
5844 return idx += HPSA_NRESERVED_CMDS;
5845 }
5846
5847
5848
5849
5850
5851 static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5852 struct CommandList *c, unsigned char lunaddr[],
5853 int reply_queue)
5854 {
5855 int rc;
5856
5857
5858 (void) fill_cmd(c, TEST_UNIT_READY, h,
5859 NULL, 0, 0, lunaddr, TYPE_CMD);
5860 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5861 if (rc)
5862 return rc;
5863
5864
5865
5866 if (c->err_info->CommandStatus == CMD_SUCCESS)
5867 return 0;
5868
5869
5870
5871
5872
5873
5874 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5875 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5876 (c->err_info->SenseInfo[2] == NO_SENSE ||
5877 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5878 return 0;
5879
5880 return 1;
5881 }
5882
5883
5884
5885
5886
5887 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5888 struct CommandList *c,
5889 unsigned char lunaddr[], int reply_queue)
5890 {
5891 int rc;
5892 int count = 0;
5893 int waittime = 1;
5894
5895
5896 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5897
5898
5899
5900
5901
5902 msleep(1000 * waittime);
5903
5904 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5905 if (!rc)
5906 break;
5907
5908
5909 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5910 waittime *= 2;
5911
5912 dev_warn(&h->pdev->dev,
5913 "waiting %d secs for device to become ready.\n",
5914 waittime);
5915 }
5916
5917 return rc;
5918 }
5919
5920 static int wait_for_device_to_become_ready(struct ctlr_info *h,
5921 unsigned char lunaddr[],
5922 int reply_queue)
5923 {
5924 int first_queue;
5925 int last_queue;
5926 int rq;
5927 int rc = 0;
5928 struct CommandList *c;
5929
5930 c = cmd_alloc(h);
5931
5932
5933
5934
5935
5936
5937 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5938 first_queue = 0;
5939 last_queue = h->nreply_queues - 1;
5940 } else {
5941 first_queue = reply_queue;
5942 last_queue = reply_queue;
5943 }
5944
5945 for (rq = first_queue; rq <= last_queue; rq++) {
5946 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5947 if (rc)
5948 break;
5949 }
5950
5951 if (rc)
5952 dev_warn(&h->pdev->dev, "giving up on device.\n");
5953 else
5954 dev_warn(&h->pdev->dev, "device is ready.\n");
5955
5956 cmd_free(h, c);
5957 return rc;
5958 }
5959
5960
5961
5962
5963 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5964 {
5965 int rc = SUCCESS;
5966 int i;
5967 struct ctlr_info *h;
5968 struct hpsa_scsi_dev_t *dev = NULL;
5969 u8 reset_type;
5970 char msg[48];
5971 unsigned long flags;
5972
5973
5974 h = sdev_to_hba(scsicmd->device);
5975 if (h == NULL)
5976 return FAILED;
5977
5978 spin_lock_irqsave(&h->reset_lock, flags);
5979 h->reset_in_progress = 1;
5980 spin_unlock_irqrestore(&h->reset_lock, flags);
5981
5982 if (lockup_detected(h)) {
5983 rc = FAILED;
5984 goto return_reset_status;
5985 }
5986
5987 dev = scsicmd->device->hostdata;
5988 if (!dev) {
5989 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
5990 rc = FAILED;
5991 goto return_reset_status;
5992 }
5993
5994 if (dev->devtype == TYPE_ENCLOSURE) {
5995 rc = SUCCESS;
5996 goto return_reset_status;
5997 }
5998
5999
6000 if (lockup_detected(h)) {
6001 snprintf(msg, sizeof(msg),
6002 "cmd %d RESET FAILED, lockup detected",
6003 hpsa_get_cmd_index(scsicmd));
6004 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6005 rc = FAILED;
6006 goto return_reset_status;
6007 }
6008
6009
6010 if (detect_controller_lockup(h)) {
6011 snprintf(msg, sizeof(msg),
6012 "cmd %d RESET FAILED, new lockup detected",
6013 hpsa_get_cmd_index(scsicmd));
6014 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6015 rc = FAILED;
6016 goto return_reset_status;
6017 }
6018
6019
6020 if (is_hba_lunid(dev->scsi3addr)) {
6021 rc = SUCCESS;
6022 goto return_reset_status;
6023 }
6024
6025 if (is_logical_dev_addr_mode(dev->scsi3addr))
6026 reset_type = HPSA_DEVICE_RESET_MSG;
6027 else
6028 reset_type = HPSA_PHYS_TARGET_RESET;
6029
6030 sprintf(msg, "resetting %s",
6031 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
6032 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6033
6034
6035
6036
6037 dev->in_reset = true;
6038 for (i = 0; i < 10; i++) {
6039 if (atomic_read(&dev->commands_outstanding) > 0)
6040 msleep(1000);
6041 else
6042 break;
6043 }
6044
6045
6046 rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE);
6047 if (rc == 0)
6048 rc = SUCCESS;
6049 else
6050 rc = FAILED;
6051
6052 sprintf(msg, "reset %s %s",
6053 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
6054 rc == SUCCESS ? "completed successfully" : "failed");
6055 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6056
6057 return_reset_status:
6058 spin_lock_irqsave(&h->reset_lock, flags);
6059 h->reset_in_progress = 0;
6060 if (dev)
6061 dev->in_reset = false;
6062 spin_unlock_irqrestore(&h->reset_lock, flags);
6063 return rc;
6064 }
6065
6066
6067
6068
6069
6070
6071
6072 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
6073 struct scsi_cmnd *scmd)
6074 {
6075 int idx = hpsa_get_cmd_index(scmd);
6076 struct CommandList *c = h->cmd_pool + idx;
6077
6078 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
6079 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
6080 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
6081
6082
6083
6084 BUG();
6085 }
6086
6087 if (unlikely(!hpsa_is_cmd_idle(c))) {
6088
6089
6090
6091
6092
6093
6094 if (idx != h->last_collision_tag) {
6095 dev_warn(&h->pdev->dev,
6096 "%s: tag collision (tag=%d)\n", __func__, idx);
6097 if (scmd)
6098 scsi_print_command(scmd);
6099 h->last_collision_tag = idx;
6100 }
6101 return NULL;
6102 }
6103
6104 atomic_inc(&c->refcount);
6105
6106 hpsa_cmd_partial_init(h, idx, c);
6107 return c;
6108 }
6109
6110 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
6111 {
6112
6113
6114
6115
6116 (void)atomic_dec(&c->refcount);
6117 }
6118
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128 static struct CommandList *cmd_alloc(struct ctlr_info *h)
6129 {
6130 struct CommandList *c;
6131 int refcount, i;
6132 int offset = 0;
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145
6146
6147
6148
6149
6150
6151
6152
6153 for (;;) {
6154 i = find_next_zero_bit(h->cmd_pool_bits,
6155 HPSA_NRESERVED_CMDS,
6156 offset);
6157 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
6158 offset = 0;
6159 continue;
6160 }
6161 c = h->cmd_pool + i;
6162 refcount = atomic_inc_return(&c->refcount);
6163 if (unlikely(refcount > 1)) {
6164 cmd_free(h, c);
6165 offset = (i + 1) % HPSA_NRESERVED_CMDS;
6166 continue;
6167 }
6168 set_bit(i & (BITS_PER_LONG - 1),
6169 h->cmd_pool_bits + (i / BITS_PER_LONG));
6170 break;
6171 }
6172 hpsa_cmd_partial_init(h, i, c);
6173 c->device = NULL;
6174 return c;
6175 }
6176
6177
6178
6179
6180
6181
6182
6183 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
6184 {
6185 if (atomic_dec_and_test(&c->refcount)) {
6186 int i;
6187
6188 i = c - h->cmd_pool;
6189 clear_bit(i & (BITS_PER_LONG - 1),
6190 h->cmd_pool_bits + (i / BITS_PER_LONG));
6191 }
6192 }
6193
6194 #ifdef CONFIG_COMPAT
6195
6196 static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd,
6197 void __user *arg)
6198 {
6199 IOCTL32_Command_struct __user *arg32 =
6200 (IOCTL32_Command_struct __user *) arg;
6201 IOCTL_Command_struct arg64;
6202 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
6203 int err;
6204 u32 cp;
6205
6206 memset(&arg64, 0, sizeof(arg64));
6207 err = 0;
6208 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6209 sizeof(arg64.LUN_info));
6210 err |= copy_from_user(&arg64.Request, &arg32->Request,
6211 sizeof(arg64.Request));
6212 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6213 sizeof(arg64.error_info));
6214 err |= get_user(arg64.buf_size, &arg32->buf_size);
6215 err |= get_user(cp, &arg32->buf);
6216 arg64.buf = compat_ptr(cp);
6217 err |= copy_to_user(p, &arg64, sizeof(arg64));
6218
6219 if (err)
6220 return -EFAULT;
6221
6222 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
6223 if (err)
6224 return err;
6225 err |= copy_in_user(&arg32->error_info, &p->error_info,
6226 sizeof(arg32->error_info));
6227 if (err)
6228 return -EFAULT;
6229 return err;
6230 }
6231
6232 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
6233 unsigned int cmd, void __user *arg)
6234 {
6235 BIG_IOCTL32_Command_struct __user *arg32 =
6236 (BIG_IOCTL32_Command_struct __user *) arg;
6237 BIG_IOCTL_Command_struct arg64;
6238 BIG_IOCTL_Command_struct __user *p =
6239 compat_alloc_user_space(sizeof(arg64));
6240 int err;
6241 u32 cp;
6242
6243 memset(&arg64, 0, sizeof(arg64));
6244 err = 0;
6245 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6246 sizeof(arg64.LUN_info));
6247 err |= copy_from_user(&arg64.Request, &arg32->Request,
6248 sizeof(arg64.Request));
6249 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6250 sizeof(arg64.error_info));
6251 err |= get_user(arg64.buf_size, &arg32->buf_size);
6252 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
6253 err |= get_user(cp, &arg32->buf);
6254 arg64.buf = compat_ptr(cp);
6255 err |= copy_to_user(p, &arg64, sizeof(arg64));
6256
6257 if (err)
6258 return -EFAULT;
6259
6260 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
6261 if (err)
6262 return err;
6263 err |= copy_in_user(&arg32->error_info, &p->error_info,
6264 sizeof(arg32->error_info));
6265 if (err)
6266 return -EFAULT;
6267 return err;
6268 }
6269
6270 static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
6271 void __user *arg)
6272 {
6273 switch (cmd) {
6274 case CCISS_GETPCIINFO:
6275 case CCISS_GETINTINFO:
6276 case CCISS_SETINTINFO:
6277 case CCISS_GETNODENAME:
6278 case CCISS_SETNODENAME:
6279 case CCISS_GETHEARTBEAT:
6280 case CCISS_GETBUSTYPES:
6281 case CCISS_GETFIRMVER:
6282 case CCISS_GETDRIVVER:
6283 case CCISS_REVALIDVOLS:
6284 case CCISS_DEREGDISK:
6285 case CCISS_REGNEWDISK:
6286 case CCISS_REGNEWD:
6287 case CCISS_RESCANDISK:
6288 case CCISS_GETLUNINFO:
6289 return hpsa_ioctl(dev, cmd, arg);
6290
6291 case CCISS_PASSTHRU32:
6292 return hpsa_ioctl32_passthru(dev, cmd, arg);
6293 case CCISS_BIG_PASSTHRU32:
6294 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6295
6296 default:
6297 return -ENOIOCTLCMD;
6298 }
6299 }
6300 #endif
6301
6302 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6303 {
6304 struct hpsa_pci_info pciinfo;
6305
6306 if (!argp)
6307 return -EINVAL;
6308 pciinfo.domain = pci_domain_nr(h->pdev->bus);
6309 pciinfo.bus = h->pdev->bus->number;
6310 pciinfo.dev_fn = h->pdev->devfn;
6311 pciinfo.board_id = h->board_id;
6312 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6313 return -EFAULT;
6314 return 0;
6315 }
6316
6317 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6318 {
6319 DriverVer_type DriverVer;
6320 unsigned char vmaj, vmin, vsubmin;
6321 int rc;
6322
6323 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6324 &vmaj, &vmin, &vsubmin);
6325 if (rc != 3) {
6326 dev_info(&h->pdev->dev, "driver version string '%s' "
6327 "unrecognized.", HPSA_DRIVER_VERSION);
6328 vmaj = 0;
6329 vmin = 0;
6330 vsubmin = 0;
6331 }
6332 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6333 if (!argp)
6334 return -EINVAL;
6335 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6336 return -EFAULT;
6337 return 0;
6338 }
6339
6340 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6341 {
6342 IOCTL_Command_struct iocommand;
6343 struct CommandList *c;
6344 char *buff = NULL;
6345 u64 temp64;
6346 int rc = 0;
6347
6348 if (!argp)
6349 return -EINVAL;
6350 if (!capable(CAP_SYS_RAWIO))
6351 return -EPERM;
6352 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6353 return -EFAULT;
6354 if ((iocommand.buf_size < 1) &&
6355 (iocommand.Request.Type.Direction != XFER_NONE)) {
6356 return -EINVAL;
6357 }
6358 if (iocommand.buf_size > 0) {
6359 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
6360 if (buff == NULL)
6361 return -ENOMEM;
6362 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6363
6364 if (copy_from_user(buff, iocommand.buf,
6365 iocommand.buf_size)) {
6366 rc = -EFAULT;
6367 goto out_kfree;
6368 }
6369 } else {
6370 memset(buff, 0, iocommand.buf_size);
6371 }
6372 }
6373 c = cmd_alloc(h);
6374
6375
6376 c->cmd_type = CMD_IOCTL_PEND;
6377 c->scsi_cmd = SCSI_CMD_BUSY;
6378
6379 c->Header.ReplyQueue = 0;
6380 if (iocommand.buf_size > 0) {
6381 c->Header.SGList = 1;
6382 c->Header.SGTotal = cpu_to_le16(1);
6383 } else {
6384 c->Header.SGList = 0;
6385 c->Header.SGTotal = cpu_to_le16(0);
6386 }
6387 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
6388
6389
6390 memcpy(&c->Request, &iocommand.Request,
6391 sizeof(c->Request));
6392
6393
6394 if (iocommand.buf_size > 0) {
6395 temp64 = dma_map_single(&h->pdev->dev, buff,
6396 iocommand.buf_size, DMA_BIDIRECTIONAL);
6397 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6398 c->SG[0].Addr = cpu_to_le64(0);
6399 c->SG[0].Len = cpu_to_le32(0);
6400 rc = -ENOMEM;
6401 goto out;
6402 }
6403 c->SG[0].Addr = cpu_to_le64(temp64);
6404 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
6405 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST);
6406 }
6407 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6408 NO_TIMEOUT);
6409 if (iocommand.buf_size > 0)
6410 hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
6411 check_ioctl_unit_attention(h, c);
6412 if (rc) {
6413 rc = -EIO;
6414 goto out;
6415 }
6416
6417
6418 memcpy(&iocommand.error_info, c->err_info,
6419 sizeof(iocommand.error_info));
6420 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
6421 rc = -EFAULT;
6422 goto out;
6423 }
6424 if ((iocommand.Request.Type.Direction & XFER_READ) &&
6425 iocommand.buf_size > 0) {
6426
6427 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
6428 rc = -EFAULT;
6429 goto out;
6430 }
6431 }
6432 out:
6433 cmd_free(h, c);
6434 out_kfree:
6435 kfree(buff);
6436 return rc;
6437 }
6438
6439 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6440 {
6441 BIG_IOCTL_Command_struct *ioc;
6442 struct CommandList *c;
6443 unsigned char **buff = NULL;
6444 int *buff_size = NULL;
6445 u64 temp64;
6446 BYTE sg_used = 0;
6447 int status = 0;
6448 u32 left;
6449 u32 sz;
6450 BYTE __user *data_ptr;
6451
6452 if (!argp)
6453 return -EINVAL;
6454 if (!capable(CAP_SYS_RAWIO))
6455 return -EPERM;
6456 ioc = vmemdup_user(argp, sizeof(*ioc));
6457 if (IS_ERR(ioc)) {
6458 status = PTR_ERR(ioc);
6459 goto cleanup1;
6460 }
6461 if ((ioc->buf_size < 1) &&
6462 (ioc->Request.Type.Direction != XFER_NONE)) {
6463 status = -EINVAL;
6464 goto cleanup1;
6465 }
6466
6467 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
6468 status = -EINVAL;
6469 goto cleanup1;
6470 }
6471 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
6472 status = -EINVAL;
6473 goto cleanup1;
6474 }
6475 buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL);
6476 if (!buff) {
6477 status = -ENOMEM;
6478 goto cleanup1;
6479 }
6480 buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, sizeof(int), GFP_KERNEL);
6481 if (!buff_size) {
6482 status = -ENOMEM;
6483 goto cleanup1;
6484 }
6485 left = ioc->buf_size;
6486 data_ptr = ioc->buf;
6487 while (left) {
6488 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6489 buff_size[sg_used] = sz;
6490 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6491 if (buff[sg_used] == NULL) {
6492 status = -ENOMEM;
6493 goto cleanup1;
6494 }
6495 if (ioc->Request.Type.Direction & XFER_WRITE) {
6496 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6497 status = -EFAULT;
6498 goto cleanup1;
6499 }
6500 } else
6501 memset(buff[sg_used], 0, sz);
6502 left -= sz;
6503 data_ptr += sz;
6504 sg_used++;
6505 }
6506 c = cmd_alloc(h);
6507
6508 c->cmd_type = CMD_IOCTL_PEND;
6509 c->scsi_cmd = SCSI_CMD_BUSY;
6510 c->Header.ReplyQueue = 0;
6511 c->Header.SGList = (u8) sg_used;
6512 c->Header.SGTotal = cpu_to_le16(sg_used);
6513 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6514 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6515 if (ioc->buf_size > 0) {
6516 int i;
6517 for (i = 0; i < sg_used; i++) {
6518 temp64 = dma_map_single(&h->pdev->dev, buff[i],
6519 buff_size[i], DMA_BIDIRECTIONAL);
6520 if (dma_mapping_error(&h->pdev->dev,
6521 (dma_addr_t) temp64)) {
6522 c->SG[i].Addr = cpu_to_le64(0);
6523 c->SG[i].Len = cpu_to_le32(0);
6524 hpsa_pci_unmap(h->pdev, c, i,
6525 DMA_BIDIRECTIONAL);
6526 status = -ENOMEM;
6527 goto cleanup0;
6528 }
6529 c->SG[i].Addr = cpu_to_le64(temp64);
6530 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6531 c->SG[i].Ext = cpu_to_le32(0);
6532 }
6533 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6534 }
6535 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6536 NO_TIMEOUT);
6537 if (sg_used)
6538 hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL);
6539 check_ioctl_unit_attention(h, c);
6540 if (status) {
6541 status = -EIO;
6542 goto cleanup0;
6543 }
6544
6545
6546 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6547 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
6548 status = -EFAULT;
6549 goto cleanup0;
6550 }
6551 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6552 int i;
6553
6554
6555 BYTE __user *ptr = ioc->buf;
6556 for (i = 0; i < sg_used; i++) {
6557 if (copy_to_user(ptr, buff[i], buff_size[i])) {
6558 status = -EFAULT;
6559 goto cleanup0;
6560 }
6561 ptr += buff_size[i];
6562 }
6563 }
6564 status = 0;
6565 cleanup0:
6566 cmd_free(h, c);
6567 cleanup1:
6568 if (buff) {
6569 int i;
6570
6571 for (i = 0; i < sg_used; i++)
6572 kfree(buff[i]);
6573 kfree(buff);
6574 }
6575 kfree(buff_size);
6576 kvfree(ioc);
6577 return status;
6578 }
6579
6580 static void check_ioctl_unit_attention(struct ctlr_info *h,
6581 struct CommandList *c)
6582 {
6583 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6584 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6585 (void) check_for_unit_attention(h, c);
6586 }
6587
6588
6589
6590
6591 static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
6592 void __user *arg)
6593 {
6594 struct ctlr_info *h;
6595 void __user *argp = (void __user *)arg;
6596 int rc;
6597
6598 h = sdev_to_hba(dev);
6599
6600 switch (cmd) {
6601 case CCISS_DEREGDISK:
6602 case CCISS_REGNEWDISK:
6603 case CCISS_REGNEWD:
6604 hpsa_scan_start(h->scsi_host);
6605 return 0;
6606 case CCISS_GETPCIINFO:
6607 return hpsa_getpciinfo_ioctl(h, argp);
6608 case CCISS_GETDRIVVER:
6609 return hpsa_getdrivver_ioctl(h, argp);
6610 case CCISS_PASSTHRU:
6611 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6612 return -EAGAIN;
6613 rc = hpsa_passthru_ioctl(h, argp);
6614 atomic_inc(&h->passthru_cmds_avail);
6615 return rc;
6616 case CCISS_BIG_PASSTHRU:
6617 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6618 return -EAGAIN;
6619 rc = hpsa_big_passthru_ioctl(h, argp);
6620 atomic_inc(&h->passthru_cmds_avail);
6621 return rc;
6622 default:
6623 return -ENOTTY;
6624 }
6625 }
6626
6627 static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type)
6628 {
6629 struct CommandList *c;
6630
6631 c = cmd_alloc(h);
6632
6633
6634 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6635 RAID_CTLR_LUNID, TYPE_MSG);
6636 c->Request.CDB[1] = reset_type;
6637 c->waiting = NULL;
6638 enqueue_cmd_and_start_io(h, c);
6639
6640
6641
6642
6643 return;
6644 }
6645
6646 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6647 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6648 int cmd_type)
6649 {
6650 enum dma_data_direction dir = DMA_NONE;
6651
6652 c->cmd_type = CMD_IOCTL_PEND;
6653 c->scsi_cmd = SCSI_CMD_BUSY;
6654 c->Header.ReplyQueue = 0;
6655 if (buff != NULL && size > 0) {
6656 c->Header.SGList = 1;
6657 c->Header.SGTotal = cpu_to_le16(1);
6658 } else {
6659 c->Header.SGList = 0;
6660 c->Header.SGTotal = cpu_to_le16(0);
6661 }
6662 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6663
6664 if (cmd_type == TYPE_CMD) {
6665 switch (cmd) {
6666 case HPSA_INQUIRY:
6667
6668 if (page_code & VPD_PAGE) {
6669 c->Request.CDB[1] = 0x01;
6670 c->Request.CDB[2] = (page_code & 0xff);
6671 }
6672 c->Request.CDBLen = 6;
6673 c->Request.type_attr_dir =
6674 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6675 c->Request.Timeout = 0;
6676 c->Request.CDB[0] = HPSA_INQUIRY;
6677 c->Request.CDB[4] = size & 0xFF;
6678 break;
6679 case RECEIVE_DIAGNOSTIC:
6680 c->Request.CDBLen = 6;
6681 c->Request.type_attr_dir =
6682 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6683 c->Request.Timeout = 0;
6684 c->Request.CDB[0] = cmd;
6685 c->Request.CDB[1] = 1;
6686 c->Request.CDB[2] = 1;
6687 c->Request.CDB[3] = (size >> 8) & 0xFF;
6688 c->Request.CDB[4] = size & 0xFF;
6689 break;
6690 case HPSA_REPORT_LOG:
6691 case HPSA_REPORT_PHYS:
6692
6693
6694
6695 c->Request.CDBLen = 12;
6696 c->Request.type_attr_dir =
6697 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6698 c->Request.Timeout = 0;
6699 c->Request.CDB[0] = cmd;
6700 c->Request.CDB[6] = (size >> 24) & 0xFF;
6701 c->Request.CDB[7] = (size >> 16) & 0xFF;
6702 c->Request.CDB[8] = (size >> 8) & 0xFF;
6703 c->Request.CDB[9] = size & 0xFF;
6704 break;
6705 case BMIC_SENSE_DIAG_OPTIONS:
6706 c->Request.CDBLen = 16;
6707 c->Request.type_attr_dir =
6708 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6709 c->Request.Timeout = 0;
6710
6711 c->Request.CDB[0] = BMIC_READ;
6712 c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
6713 break;
6714 case BMIC_SET_DIAG_OPTIONS:
6715 c->Request.CDBLen = 16;
6716 c->Request.type_attr_dir =
6717 TYPE_ATTR_DIR(cmd_type,
6718 ATTR_SIMPLE, XFER_WRITE);
6719 c->Request.Timeout = 0;
6720 c->Request.CDB[0] = BMIC_WRITE;
6721 c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
6722 break;
6723 case HPSA_CACHE_FLUSH:
6724 c->Request.CDBLen = 12;
6725 c->Request.type_attr_dir =
6726 TYPE_ATTR_DIR(cmd_type,
6727 ATTR_SIMPLE, XFER_WRITE);
6728 c->Request.Timeout = 0;
6729 c->Request.CDB[0] = BMIC_WRITE;
6730 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6731 c->Request.CDB[7] = (size >> 8) & 0xFF;
6732 c->Request.CDB[8] = size & 0xFF;
6733 break;
6734 case TEST_UNIT_READY:
6735 c->Request.CDBLen = 6;
6736 c->Request.type_attr_dir =
6737 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6738 c->Request.Timeout = 0;
6739 break;
6740 case HPSA_GET_RAID_MAP:
6741 c->Request.CDBLen = 12;
6742 c->Request.type_attr_dir =
6743 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6744 c->Request.Timeout = 0;
6745 c->Request.CDB[0] = HPSA_CISS_READ;
6746 c->Request.CDB[1] = cmd;
6747 c->Request.CDB[6] = (size >> 24) & 0xFF;
6748 c->Request.CDB[7] = (size >> 16) & 0xFF;
6749 c->Request.CDB[8] = (size >> 8) & 0xFF;
6750 c->Request.CDB[9] = size & 0xFF;
6751 break;
6752 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6753 c->Request.CDBLen = 10;
6754 c->Request.type_attr_dir =
6755 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6756 c->Request.Timeout = 0;
6757 c->Request.CDB[0] = BMIC_READ;
6758 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6759 c->Request.CDB[7] = (size >> 16) & 0xFF;
6760 c->Request.CDB[8] = (size >> 8) & 0xFF;
6761 break;
6762 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6763 c->Request.CDBLen = 10;
6764 c->Request.type_attr_dir =
6765 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6766 c->Request.Timeout = 0;
6767 c->Request.CDB[0] = BMIC_READ;
6768 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6769 c->Request.CDB[7] = (size >> 16) & 0xFF;
6770 c->Request.CDB[8] = (size >> 8) & 0XFF;
6771 break;
6772 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
6773 c->Request.CDBLen = 10;
6774 c->Request.type_attr_dir =
6775 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6776 c->Request.Timeout = 0;
6777 c->Request.CDB[0] = BMIC_READ;
6778 c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
6779 c->Request.CDB[7] = (size >> 16) & 0xFF;
6780 c->Request.CDB[8] = (size >> 8) & 0XFF;
6781 break;
6782 case BMIC_SENSE_STORAGE_BOX_PARAMS:
6783 c->Request.CDBLen = 10;
6784 c->Request.type_attr_dir =
6785 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6786 c->Request.Timeout = 0;
6787 c->Request.CDB[0] = BMIC_READ;
6788 c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
6789 c->Request.CDB[7] = (size >> 16) & 0xFF;
6790 c->Request.CDB[8] = (size >> 8) & 0XFF;
6791 break;
6792 case BMIC_IDENTIFY_CONTROLLER:
6793 c->Request.CDBLen = 10;
6794 c->Request.type_attr_dir =
6795 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6796 c->Request.Timeout = 0;
6797 c->Request.CDB[0] = BMIC_READ;
6798 c->Request.CDB[1] = 0;
6799 c->Request.CDB[2] = 0;
6800 c->Request.CDB[3] = 0;
6801 c->Request.CDB[4] = 0;
6802 c->Request.CDB[5] = 0;
6803 c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
6804 c->Request.CDB[7] = (size >> 16) & 0xFF;
6805 c->Request.CDB[8] = (size >> 8) & 0XFF;
6806 c->Request.CDB[9] = 0;
6807 break;
6808 default:
6809 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6810 BUG();
6811 }
6812 } else if (cmd_type == TYPE_MSG) {
6813 switch (cmd) {
6814
6815 case HPSA_PHYS_TARGET_RESET:
6816 c->Request.CDBLen = 16;
6817 c->Request.type_attr_dir =
6818 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6819 c->Request.Timeout = 0;
6820 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6821 c->Request.CDB[0] = HPSA_RESET;
6822 c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6823
6824 c->Request.CDB[4] = 0x00;
6825 c->Request.CDB[5] = 0x00;
6826 c->Request.CDB[6] = 0x00;
6827 c->Request.CDB[7] = 0x00;
6828 break;
6829 case HPSA_DEVICE_RESET_MSG:
6830 c->Request.CDBLen = 16;
6831 c->Request.type_attr_dir =
6832 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6833 c->Request.Timeout = 0;
6834 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6835 c->Request.CDB[0] = cmd;
6836 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6837
6838
6839 c->Request.CDB[4] = 0x00;
6840 c->Request.CDB[5] = 0x00;
6841 c->Request.CDB[6] = 0x00;
6842 c->Request.CDB[7] = 0x00;
6843 break;
6844 default:
6845 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6846 cmd);
6847 BUG();
6848 }
6849 } else {
6850 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6851 BUG();
6852 }
6853
6854 switch (GET_DIR(c->Request.type_attr_dir)) {
6855 case XFER_READ:
6856 dir = DMA_FROM_DEVICE;
6857 break;
6858 case XFER_WRITE:
6859 dir = DMA_TO_DEVICE;
6860 break;
6861 case XFER_NONE:
6862 dir = DMA_NONE;
6863 break;
6864 default:
6865 dir = DMA_BIDIRECTIONAL;
6866 }
6867 if (hpsa_map_one(h->pdev, c, buff, size, dir))
6868 return -1;
6869 return 0;
6870 }
6871
6872
6873
6874
6875 static void __iomem *remap_pci_mem(ulong base, ulong size)
6876 {
6877 ulong page_base = ((ulong) base) & PAGE_MASK;
6878 ulong page_offs = ((ulong) base) - page_base;
6879 void __iomem *page_remapped = ioremap_nocache(page_base,
6880 page_offs + size);
6881
6882 return page_remapped ? (page_remapped + page_offs) : NULL;
6883 }
6884
6885 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6886 {
6887 return h->access.command_completed(h, q);
6888 }
6889
6890 static inline bool interrupt_pending(struct ctlr_info *h)
6891 {
6892 return h->access.intr_pending(h);
6893 }
6894
6895 static inline long interrupt_not_for_us(struct ctlr_info *h)
6896 {
6897 return (h->access.intr_pending(h) == 0) ||
6898 (h->interrupts_enabled == 0);
6899 }
6900
6901 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6902 u32 raw_tag)
6903 {
6904 if (unlikely(tag_index >= h->nr_cmds)) {
6905 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6906 return 1;
6907 }
6908 return 0;
6909 }
6910
6911 static inline void finish_cmd(struct CommandList *c)
6912 {
6913 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6914 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6915 || c->cmd_type == CMD_IOACCEL2))
6916 complete_scsi_command(c);
6917 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6918 complete(c->waiting);
6919 }
6920
6921
6922 static inline void process_indexed_cmd(struct ctlr_info *h,
6923 u32 raw_tag)
6924 {
6925 u32 tag_index;
6926 struct CommandList *c;
6927
6928 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6929 if (!bad_tag(h, tag_index, raw_tag)) {
6930 c = h->cmd_pool + tag_index;
6931 finish_cmd(c);
6932 }
6933 }
6934
6935
6936
6937
6938
6939
6940 static int ignore_bogus_interrupt(struct ctlr_info *h)
6941 {
6942 if (likely(!reset_devices))
6943 return 0;
6944
6945 if (likely(h->interrupts_enabled))
6946 return 0;
6947
6948 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6949 "(known firmware bug.) Ignoring.\n");
6950
6951 return 1;
6952 }
6953
6954
6955
6956
6957
6958
6959 static struct ctlr_info *queue_to_hba(u8 *queue)
6960 {
6961 return container_of((queue - *queue), struct ctlr_info, q[0]);
6962 }
6963
6964 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6965 {
6966 struct ctlr_info *h = queue_to_hba(queue);
6967 u8 q = *(u8 *) queue;
6968 u32 raw_tag;
6969
6970 if (ignore_bogus_interrupt(h))
6971 return IRQ_NONE;
6972
6973 if (interrupt_not_for_us(h))
6974 return IRQ_NONE;
6975 h->last_intr_timestamp = get_jiffies_64();
6976 while (interrupt_pending(h)) {
6977 raw_tag = get_next_completion(h, q);
6978 while (raw_tag != FIFO_EMPTY)
6979 raw_tag = next_command(h, q);
6980 }
6981 return IRQ_HANDLED;
6982 }
6983
6984 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
6985 {
6986 struct ctlr_info *h = queue_to_hba(queue);
6987 u32 raw_tag;
6988 u8 q = *(u8 *) queue;
6989
6990 if (ignore_bogus_interrupt(h))
6991 return IRQ_NONE;
6992
6993 h->last_intr_timestamp = get_jiffies_64();
6994 raw_tag = get_next_completion(h, q);
6995 while (raw_tag != FIFO_EMPTY)
6996 raw_tag = next_command(h, q);
6997 return IRQ_HANDLED;
6998 }
6999
7000 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
7001 {
7002 struct ctlr_info *h = queue_to_hba((u8 *) queue);
7003 u32 raw_tag;
7004 u8 q = *(u8 *) queue;
7005
7006 if (interrupt_not_for_us(h))
7007 return IRQ_NONE;
7008 h->last_intr_timestamp = get_jiffies_64();
7009 while (interrupt_pending(h)) {
7010 raw_tag = get_next_completion(h, q);
7011 while (raw_tag != FIFO_EMPTY) {
7012 process_indexed_cmd(h, raw_tag);
7013 raw_tag = next_command(h, q);
7014 }
7015 }
7016 return IRQ_HANDLED;
7017 }
7018
7019 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
7020 {
7021 struct ctlr_info *h = queue_to_hba(queue);
7022 u32 raw_tag;
7023 u8 q = *(u8 *) queue;
7024
7025 h->last_intr_timestamp = get_jiffies_64();
7026 raw_tag = get_next_completion(h, q);
7027 while (raw_tag != FIFO_EMPTY) {
7028 process_indexed_cmd(h, raw_tag);
7029 raw_tag = next_command(h, q);
7030 }
7031 return IRQ_HANDLED;
7032 }
7033
7034
7035
7036
7037
7038 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
7039 unsigned char type)
7040 {
7041 struct Command {
7042 struct CommandListHeader CommandHeader;
7043 struct RequestBlock Request;
7044 struct ErrDescriptor ErrorDescriptor;
7045 };
7046 struct Command *cmd;
7047 static const size_t cmd_sz = sizeof(*cmd) +
7048 sizeof(cmd->ErrorDescriptor);
7049 dma_addr_t paddr64;
7050 __le32 paddr32;
7051 u32 tag;
7052 void __iomem *vaddr;
7053 int i, err;
7054
7055 vaddr = pci_ioremap_bar(pdev, 0);
7056 if (vaddr == NULL)
7057 return -ENOMEM;
7058
7059
7060
7061
7062
7063 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7064 if (err) {
7065 iounmap(vaddr);
7066 return err;
7067 }
7068
7069 cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL);
7070 if (cmd == NULL) {
7071 iounmap(vaddr);
7072 return -ENOMEM;
7073 }
7074
7075
7076
7077
7078
7079 paddr32 = cpu_to_le32(paddr64);
7080
7081 cmd->CommandHeader.ReplyQueue = 0;
7082 cmd->CommandHeader.SGList = 0;
7083 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
7084 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
7085 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
7086
7087 cmd->Request.CDBLen = 16;
7088 cmd->Request.type_attr_dir =
7089 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
7090 cmd->Request.Timeout = 0;
7091 cmd->Request.CDB[0] = opcode;
7092 cmd->Request.CDB[1] = type;
7093 memset(&cmd->Request.CDB[2], 0, 14);
7094 cmd->ErrorDescriptor.Addr =
7095 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
7096 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
7097
7098 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
7099
7100 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
7101 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
7102 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
7103 break;
7104 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
7105 }
7106
7107 iounmap(vaddr);
7108
7109
7110
7111
7112 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
7113 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
7114 opcode, type);
7115 return -ETIMEDOUT;
7116 }
7117
7118 dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64);
7119
7120 if (tag & HPSA_ERROR_BIT) {
7121 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
7122 opcode, type);
7123 return -EIO;
7124 }
7125
7126 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
7127 opcode, type);
7128 return 0;
7129 }
7130
7131 #define hpsa_noop(p) hpsa_message(p, 3, 0)
7132
7133 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
7134 void __iomem *vaddr, u32 use_doorbell)
7135 {
7136
7137 if (use_doorbell) {
7138
7139
7140
7141
7142 dev_info(&pdev->dev, "using doorbell to reset controller\n");
7143 writel(use_doorbell, vaddr + SA5_DOORBELL);
7144
7145
7146
7147
7148
7149
7150 msleep(10000);
7151 } else {
7152
7153
7154
7155
7156
7157
7158
7159
7160
7161 int rc = 0;
7162
7163 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
7164
7165
7166 rc = pci_set_power_state(pdev, PCI_D3hot);
7167 if (rc)
7168 return rc;
7169
7170 msleep(500);
7171
7172
7173 rc = pci_set_power_state(pdev, PCI_D0);
7174 if (rc)
7175 return rc;
7176
7177
7178
7179
7180
7181
7182 msleep(500);
7183 }
7184 return 0;
7185 }
7186
7187 static void init_driver_version(char *driver_version, int len)
7188 {
7189 memset(driver_version, 0, len);
7190 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
7191 }
7192
7193 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
7194 {
7195 char *driver_version;
7196 int i, size = sizeof(cfgtable->driver_version);
7197
7198 driver_version = kmalloc(size, GFP_KERNEL);
7199 if (!driver_version)
7200 return -ENOMEM;
7201
7202 init_driver_version(driver_version, size);
7203 for (i = 0; i < size; i++)
7204 writeb(driver_version[i], &cfgtable->driver_version[i]);
7205 kfree(driver_version);
7206 return 0;
7207 }
7208
7209 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
7210 unsigned char *driver_ver)
7211 {
7212 int i;
7213
7214 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
7215 driver_ver[i] = readb(&cfgtable->driver_version[i]);
7216 }
7217
7218 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
7219 {
7220
7221 char *driver_ver, *old_driver_ver;
7222 int rc, size = sizeof(cfgtable->driver_version);
7223
7224 old_driver_ver = kmalloc_array(2, size, GFP_KERNEL);
7225 if (!old_driver_ver)
7226 return -ENOMEM;
7227 driver_ver = old_driver_ver + size;
7228
7229
7230
7231
7232 init_driver_version(old_driver_ver, size);
7233 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
7234 rc = !memcmp(driver_ver, old_driver_ver, size);
7235 kfree(old_driver_ver);
7236 return rc;
7237 }
7238
7239
7240
7241 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
7242 {
7243 u64 cfg_offset;
7244 u32 cfg_base_addr;
7245 u64 cfg_base_addr_index;
7246 void __iomem *vaddr;
7247 unsigned long paddr;
7248 u32 misc_fw_support;
7249 int rc;
7250 struct CfgTable __iomem *cfgtable;
7251 u32 use_doorbell;
7252 u16 command_register;
7253
7254
7255
7256
7257
7258
7259
7260
7261
7262
7263
7264
7265
7266
7267 if (!ctlr_is_resettable(board_id)) {
7268 dev_warn(&pdev->dev, "Controller not resettable\n");
7269 return -ENODEV;
7270 }
7271
7272
7273 if (!ctlr_is_hard_resettable(board_id))
7274 return -ENOTSUPP;
7275
7276
7277 pci_read_config_word(pdev, 4, &command_register);
7278 pci_save_state(pdev);
7279
7280
7281 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7282 if (rc)
7283 return rc;
7284 vaddr = remap_pci_mem(paddr, 0x250);
7285 if (!vaddr)
7286 return -ENOMEM;
7287
7288
7289 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7290 &cfg_base_addr_index, &cfg_offset);
7291 if (rc)
7292 goto unmap_vaddr;
7293 cfgtable = remap_pci_mem(pci_resource_start(pdev,
7294 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7295 if (!cfgtable) {
7296 rc = -ENOMEM;
7297 goto unmap_vaddr;
7298 }
7299 rc = write_driver_ver_to_cfgtable(cfgtable);
7300 if (rc)
7301 goto unmap_cfgtable;
7302
7303
7304
7305
7306 misc_fw_support = readl(&cfgtable->misc_fw_support);
7307 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7308 if (use_doorbell) {
7309 use_doorbell = DOORBELL_CTLR_RESET2;
7310 } else {
7311 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7312 if (use_doorbell) {
7313 dev_warn(&pdev->dev,
7314 "Soft reset not supported. Firmware update is required.\n");
7315 rc = -ENOTSUPP;
7316 goto unmap_cfgtable;
7317 }
7318 }
7319
7320 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7321 if (rc)
7322 goto unmap_cfgtable;
7323
7324 pci_restore_state(pdev);
7325 pci_write_config_word(pdev, 4, command_register);
7326
7327
7328
7329 msleep(HPSA_POST_RESET_PAUSE_MSECS);
7330
7331 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7332 if (rc) {
7333 dev_warn(&pdev->dev,
7334 "Failed waiting for board to become ready after hard reset\n");
7335 goto unmap_cfgtable;
7336 }
7337
7338 rc = controller_reset_failed(vaddr);
7339 if (rc < 0)
7340 goto unmap_cfgtable;
7341 if (rc) {
7342 dev_warn(&pdev->dev, "Unable to successfully reset "
7343 "controller. Will try soft reset.\n");
7344 rc = -ENOTSUPP;
7345 } else {
7346 dev_info(&pdev->dev, "board ready after hard reset.\n");
7347 }
7348
7349 unmap_cfgtable:
7350 iounmap(cfgtable);
7351
7352 unmap_vaddr:
7353 iounmap(vaddr);
7354 return rc;
7355 }
7356
7357
7358
7359
7360
7361
7362 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
7363 {
7364 #ifdef HPSA_DEBUG
7365 int i;
7366 char temp_name[17];
7367
7368 dev_info(dev, "Controller Configuration information\n");
7369 dev_info(dev, "------------------------------------\n");
7370 for (i = 0; i < 4; i++)
7371 temp_name[i] = readb(&(tb->Signature[i]));
7372 temp_name[4] = '\0';
7373 dev_info(dev, " Signature = %s\n", temp_name);
7374 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
7375 dev_info(dev, " Transport methods supported = 0x%x\n",
7376 readl(&(tb->TransportSupport)));
7377 dev_info(dev, " Transport methods active = 0x%x\n",
7378 readl(&(tb->TransportActive)));
7379 dev_info(dev, " Requested transport Method = 0x%x\n",
7380 readl(&(tb->HostWrite.TransportRequest)));
7381 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
7382 readl(&(tb->HostWrite.CoalIntDelay)));
7383 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
7384 readl(&(tb->HostWrite.CoalIntCount)));
7385 dev_info(dev, " Max outstanding commands = %d\n",
7386 readl(&(tb->CmdsOutMax)));
7387 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7388 for (i = 0; i < 16; i++)
7389 temp_name[i] = readb(&(tb->ServerName[i]));
7390 temp_name[16] = '\0';
7391 dev_info(dev, " Server Name = %s\n", temp_name);
7392 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
7393 readl(&(tb->HeartBeat)));
7394 #endif
7395 }
7396
7397 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7398 {
7399 int i, offset, mem_type, bar_type;
7400
7401 if (pci_bar_addr == PCI_BASE_ADDRESS_0)
7402 return 0;
7403 offset = 0;
7404 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7405 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7406 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7407 offset += 4;
7408 else {
7409 mem_type = pci_resource_flags(pdev, i) &
7410 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7411 switch (mem_type) {
7412 case PCI_BASE_ADDRESS_MEM_TYPE_32:
7413 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7414 offset += 4;
7415 break;
7416 case PCI_BASE_ADDRESS_MEM_TYPE_64:
7417 offset += 8;
7418 break;
7419 default:
7420 dev_warn(&pdev->dev,
7421 "base address is invalid\n");
7422 return -1;
7423 break;
7424 }
7425 }
7426 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7427 return i + 1;
7428 }
7429 return -1;
7430 }
7431
7432 static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7433 {
7434 pci_free_irq_vectors(h->pdev);
7435 h->msix_vectors = 0;
7436 }
7437
7438 static void hpsa_setup_reply_map(struct ctlr_info *h)
7439 {
7440 const struct cpumask *mask;
7441 unsigned int queue, cpu;
7442
7443 for (queue = 0; queue < h->msix_vectors; queue++) {
7444 mask = pci_irq_get_affinity(h->pdev, queue);
7445 if (!mask)
7446 goto fallback;
7447
7448 for_each_cpu(cpu, mask)
7449 h->reply_map[cpu] = queue;
7450 }
7451 return;
7452
7453 fallback:
7454 for_each_possible_cpu(cpu)
7455 h->reply_map[cpu] = 0;
7456 }
7457
7458
7459
7460
7461 static int hpsa_interrupt_mode(struct ctlr_info *h)
7462 {
7463 unsigned int flags = PCI_IRQ_LEGACY;
7464 int ret;
7465
7466
7467 switch (h->board_id) {
7468 case 0x40700E11:
7469 case 0x40800E11:
7470 case 0x40820E11:
7471 case 0x40830E11:
7472 break;
7473 default:
7474 ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
7475 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
7476 if (ret > 0) {
7477 h->msix_vectors = ret;
7478 return 0;
7479 }
7480
7481 flags |= PCI_IRQ_MSI;
7482 break;
7483 }
7484
7485 ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
7486 if (ret < 0)
7487 return ret;
7488 return 0;
7489 }
7490
7491 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
7492 bool *legacy_board)
7493 {
7494 int i;
7495 u32 subsystem_vendor_id, subsystem_device_id;
7496
7497 subsystem_vendor_id = pdev->subsystem_vendor;
7498 subsystem_device_id = pdev->subsystem_device;
7499 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7500 subsystem_vendor_id;
7501
7502 if (legacy_board)
7503 *legacy_board = false;
7504 for (i = 0; i < ARRAY_SIZE(products); i++)
7505 if (*board_id == products[i].board_id) {
7506 if (products[i].access != &SA5A_access &&
7507 products[i].access != &SA5B_access)
7508 return i;
7509 dev_warn(&pdev->dev,
7510 "legacy board ID: 0x%08x\n",
7511 *board_id);
7512 if (legacy_board)
7513 *legacy_board = true;
7514 return i;
7515 }
7516
7517 dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id);
7518 if (legacy_board)
7519 *legacy_board = true;
7520 return ARRAY_SIZE(products) - 1;
7521 }
7522
7523 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7524 unsigned long *memory_bar)
7525 {
7526 int i;
7527
7528 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7529 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7530
7531 *memory_bar = pci_resource_start(pdev, i);
7532 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7533 *memory_bar);
7534 return 0;
7535 }
7536 dev_warn(&pdev->dev, "no memory BAR found\n");
7537 return -ENODEV;
7538 }
7539
7540 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7541 int wait_for_ready)
7542 {
7543 int i, iterations;
7544 u32 scratchpad;
7545 if (wait_for_ready)
7546 iterations = HPSA_BOARD_READY_ITERATIONS;
7547 else
7548 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7549
7550 for (i = 0; i < iterations; i++) {
7551 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7552 if (wait_for_ready) {
7553 if (scratchpad == HPSA_FIRMWARE_READY)
7554 return 0;
7555 } else {
7556 if (scratchpad != HPSA_FIRMWARE_READY)
7557 return 0;
7558 }
7559 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7560 }
7561 dev_warn(&pdev->dev, "board not ready, timed out.\n");
7562 return -ENODEV;
7563 }
7564
7565 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7566 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7567 u64 *cfg_offset)
7568 {
7569 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7570 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7571 *cfg_base_addr &= (u32) 0x0000ffff;
7572 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7573 if (*cfg_base_addr_index == -1) {
7574 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7575 return -ENODEV;
7576 }
7577 return 0;
7578 }
7579
7580 static void hpsa_free_cfgtables(struct ctlr_info *h)
7581 {
7582 if (h->transtable) {
7583 iounmap(h->transtable);
7584 h->transtable = NULL;
7585 }
7586 if (h->cfgtable) {
7587 iounmap(h->cfgtable);
7588 h->cfgtable = NULL;
7589 }
7590 }
7591
7592
7593
7594
7595 static int hpsa_find_cfgtables(struct ctlr_info *h)
7596 {
7597 u64 cfg_offset;
7598 u32 cfg_base_addr;
7599 u64 cfg_base_addr_index;
7600 u32 trans_offset;
7601 int rc;
7602
7603 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7604 &cfg_base_addr_index, &cfg_offset);
7605 if (rc)
7606 return rc;
7607 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7608 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7609 if (!h->cfgtable) {
7610 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7611 return -ENOMEM;
7612 }
7613 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7614 if (rc)
7615 return rc;
7616
7617 trans_offset = readl(&h->cfgtable->TransMethodOffset);
7618 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7619 cfg_base_addr_index)+cfg_offset+trans_offset,
7620 sizeof(*h->transtable));
7621 if (!h->transtable) {
7622 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7623 hpsa_free_cfgtables(h);
7624 return -ENOMEM;
7625 }
7626 return 0;
7627 }
7628
7629 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7630 {
7631 #define MIN_MAX_COMMANDS 16
7632 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7633
7634 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7635
7636
7637 if (reset_devices && h->max_commands > 32)
7638 h->max_commands = 32;
7639
7640 if (h->max_commands < MIN_MAX_COMMANDS) {
7641 dev_warn(&h->pdev->dev,
7642 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7643 h->max_commands,
7644 MIN_MAX_COMMANDS);
7645 h->max_commands = MIN_MAX_COMMANDS;
7646 }
7647 }
7648
7649
7650
7651
7652
7653 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7654 {
7655 return h->maxsgentries > 512;
7656 }
7657
7658
7659
7660
7661
7662 static void hpsa_find_board_params(struct ctlr_info *h)
7663 {
7664 hpsa_get_max_perf_mode_cmds(h);
7665 h->nr_cmds = h->max_commands;
7666 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7667 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7668 if (hpsa_supports_chained_sg_blocks(h)) {
7669
7670 h->max_cmd_sg_entries = 32;
7671 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7672 h->maxsgentries--;
7673 } else {
7674
7675
7676
7677
7678
7679 h->max_cmd_sg_entries = 31;
7680 h->maxsgentries = 31;
7681 h->chainsize = 0;
7682 }
7683
7684
7685 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7686 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7687 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7688 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7689 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7690 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7691 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7692 }
7693
7694 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7695 {
7696 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7697 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7698 return false;
7699 }
7700 return true;
7701 }
7702
7703 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7704 {
7705 u32 driver_support;
7706
7707 driver_support = readl(&(h->cfgtable->driver_support));
7708
7709 #ifdef CONFIG_X86
7710 driver_support |= ENABLE_SCSI_PREFETCH;
7711 #endif
7712 driver_support |= ENABLE_UNIT_ATTN;
7713 writel(driver_support, &(h->cfgtable->driver_support));
7714 }
7715
7716
7717
7718
7719 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7720 {
7721 u32 dma_prefetch;
7722
7723 if (h->board_id != 0x3225103C)
7724 return;
7725 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7726 dma_prefetch |= 0x8000;
7727 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7728 }
7729
7730 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7731 {
7732 int i;
7733 u32 doorbell_value;
7734 unsigned long flags;
7735
7736 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7737 spin_lock_irqsave(&h->lock, flags);
7738 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7739 spin_unlock_irqrestore(&h->lock, flags);
7740 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7741 goto done;
7742
7743 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7744 }
7745 return -ENODEV;
7746 done:
7747 return 0;
7748 }
7749
7750 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7751 {
7752 int i;
7753 u32 doorbell_value;
7754 unsigned long flags;
7755
7756
7757
7758
7759
7760 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7761 if (h->remove_in_progress)
7762 goto done;
7763 spin_lock_irqsave(&h->lock, flags);
7764 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7765 spin_unlock_irqrestore(&h->lock, flags);
7766 if (!(doorbell_value & CFGTBL_ChangeReq))
7767 goto done;
7768
7769 msleep(MODE_CHANGE_WAIT_INTERVAL);
7770 }
7771 return -ENODEV;
7772 done:
7773 return 0;
7774 }
7775
7776
7777 static int hpsa_enter_simple_mode(struct ctlr_info *h)
7778 {
7779 u32 trans_support;
7780
7781 trans_support = readl(&(h->cfgtable->TransportSupport));
7782 if (!(trans_support & SIMPLE_MODE))
7783 return -ENOTSUPP;
7784
7785 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7786
7787
7788 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7789 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7790 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7791 if (hpsa_wait_for_mode_change_ack(h))
7792 goto error;
7793 print_cfg_table(&h->pdev->dev, h->cfgtable);
7794 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7795 goto error;
7796 h->transMethod = CFGTBL_Trans_Simple;
7797 return 0;
7798 error:
7799 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7800 return -ENODEV;
7801 }
7802
7803
7804 static void hpsa_free_pci_init(struct ctlr_info *h)
7805 {
7806 hpsa_free_cfgtables(h);
7807 iounmap(h->vaddr);
7808 h->vaddr = NULL;
7809 hpsa_disable_interrupt_mode(h);
7810
7811
7812
7813
7814 pci_disable_device(h->pdev);
7815 pci_release_regions(h->pdev);
7816 }
7817
7818
7819 static int hpsa_pci_init(struct ctlr_info *h)
7820 {
7821 int prod_index, err;
7822 bool legacy_board;
7823
7824 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board);
7825 if (prod_index < 0)
7826 return prod_index;
7827 h->product_name = products[prod_index].product_name;
7828 h->access = *(products[prod_index].access);
7829 h->legacy_board = legacy_board;
7830 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7831 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7832
7833 err = pci_enable_device(h->pdev);
7834 if (err) {
7835 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7836 pci_disable_device(h->pdev);
7837 return err;
7838 }
7839
7840 err = pci_request_regions(h->pdev, HPSA);
7841 if (err) {
7842 dev_err(&h->pdev->dev,
7843 "failed to obtain PCI resources\n");
7844 pci_disable_device(h->pdev);
7845 return err;
7846 }
7847
7848 pci_set_master(h->pdev);
7849
7850 err = hpsa_interrupt_mode(h);
7851 if (err)
7852 goto clean1;
7853
7854
7855 hpsa_setup_reply_map(h);
7856
7857 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7858 if (err)
7859 goto clean2;
7860 h->vaddr = remap_pci_mem(h->paddr, 0x250);
7861 if (!h->vaddr) {
7862 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7863 err = -ENOMEM;
7864 goto clean2;
7865 }
7866 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7867 if (err)
7868 goto clean3;
7869 err = hpsa_find_cfgtables(h);
7870 if (err)
7871 goto clean3;
7872 hpsa_find_board_params(h);
7873
7874 if (!hpsa_CISS_signature_present(h)) {
7875 err = -ENODEV;
7876 goto clean4;
7877 }
7878 hpsa_set_driver_support_bits(h);
7879 hpsa_p600_dma_prefetch_quirk(h);
7880 err = hpsa_enter_simple_mode(h);
7881 if (err)
7882 goto clean4;
7883 return 0;
7884
7885 clean4:
7886 hpsa_free_cfgtables(h);
7887 clean3:
7888 iounmap(h->vaddr);
7889 h->vaddr = NULL;
7890 clean2:
7891 hpsa_disable_interrupt_mode(h);
7892 clean1:
7893
7894
7895
7896
7897 pci_disable_device(h->pdev);
7898 pci_release_regions(h->pdev);
7899 return err;
7900 }
7901
7902 static void hpsa_hba_inquiry(struct ctlr_info *h)
7903 {
7904 int rc;
7905
7906 #define HBA_INQUIRY_BYTE_COUNT 64
7907 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7908 if (!h->hba_inquiry_data)
7909 return;
7910 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7911 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7912 if (rc != 0) {
7913 kfree(h->hba_inquiry_data);
7914 h->hba_inquiry_data = NULL;
7915 }
7916 }
7917
7918 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7919 {
7920 int rc, i;
7921 void __iomem *vaddr;
7922
7923 if (!reset_devices)
7924 return 0;
7925
7926
7927
7928
7929
7930 rc = pci_enable_device(pdev);
7931 if (rc) {
7932 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7933 return -ENODEV;
7934 }
7935 pci_disable_device(pdev);
7936 msleep(260);
7937 rc = pci_enable_device(pdev);
7938 if (rc) {
7939 dev_warn(&pdev->dev, "failed to enable device.\n");
7940 return -ENODEV;
7941 }
7942
7943 pci_set_master(pdev);
7944
7945 vaddr = pci_ioremap_bar(pdev, 0);
7946 if (vaddr == NULL) {
7947 rc = -ENOMEM;
7948 goto out_disable;
7949 }
7950 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7951 iounmap(vaddr);
7952
7953
7954 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
7955
7956
7957
7958
7959
7960
7961 if (rc)
7962 goto out_disable;
7963
7964
7965 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
7966 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7967 if (hpsa_noop(pdev) == 0)
7968 break;
7969 else
7970 dev_warn(&pdev->dev, "no-op failed%s\n",
7971 (i < 11 ? "; re-trying" : ""));
7972 }
7973
7974 out_disable:
7975
7976 pci_disable_device(pdev);
7977 return rc;
7978 }
7979
7980 static void hpsa_free_cmd_pool(struct ctlr_info *h)
7981 {
7982 kfree(h->cmd_pool_bits);
7983 h->cmd_pool_bits = NULL;
7984 if (h->cmd_pool) {
7985 dma_free_coherent(&h->pdev->dev,
7986 h->nr_cmds * sizeof(struct CommandList),
7987 h->cmd_pool,
7988 h->cmd_pool_dhandle);
7989 h->cmd_pool = NULL;
7990 h->cmd_pool_dhandle = 0;
7991 }
7992 if (h->errinfo_pool) {
7993 dma_free_coherent(&h->pdev->dev,
7994 h->nr_cmds * sizeof(struct ErrorInfo),
7995 h->errinfo_pool,
7996 h->errinfo_pool_dhandle);
7997 h->errinfo_pool = NULL;
7998 h->errinfo_pool_dhandle = 0;
7999 }
8000 }
8001
8002 static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
8003 {
8004 h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
8005 sizeof(unsigned long),
8006 GFP_KERNEL);
8007 h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
8008 h->nr_cmds * sizeof(*h->cmd_pool),
8009 &h->cmd_pool_dhandle, GFP_KERNEL);
8010 h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev,
8011 h->nr_cmds * sizeof(*h->errinfo_pool),
8012 &h->errinfo_pool_dhandle, GFP_KERNEL);
8013 if ((h->cmd_pool_bits == NULL)
8014 || (h->cmd_pool == NULL)
8015 || (h->errinfo_pool == NULL)) {
8016 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
8017 goto clean_up;
8018 }
8019 hpsa_preinitialize_commands(h);
8020 return 0;
8021 clean_up:
8022 hpsa_free_cmd_pool(h);
8023 return -ENOMEM;
8024 }
8025
8026
8027 static void hpsa_free_irqs(struct ctlr_info *h)
8028 {
8029 int i;
8030 int irq_vector = 0;
8031
8032 if (hpsa_simple_mode)
8033 irq_vector = h->intr_mode;
8034
8035 if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
8036
8037 free_irq(pci_irq_vector(h->pdev, irq_vector),
8038 &h->q[h->intr_mode]);
8039 h->q[h->intr_mode] = 0;
8040 return;
8041 }
8042
8043 for (i = 0; i < h->msix_vectors; i++) {
8044 free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
8045 h->q[i] = 0;
8046 }
8047 for (; i < MAX_REPLY_QUEUES; i++)
8048 h->q[i] = 0;
8049 }
8050
8051
8052 static int hpsa_request_irqs(struct ctlr_info *h,
8053 irqreturn_t (*msixhandler)(int, void *),
8054 irqreturn_t (*intxhandler)(int, void *))
8055 {
8056 int rc, i;
8057 int irq_vector = 0;
8058
8059 if (hpsa_simple_mode)
8060 irq_vector = h->intr_mode;
8061
8062
8063
8064
8065
8066 for (i = 0; i < MAX_REPLY_QUEUES; i++)
8067 h->q[i] = (u8) i;
8068
8069 if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
8070
8071 for (i = 0; i < h->msix_vectors; i++) {
8072 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
8073 rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
8074 0, h->intrname[i],
8075 &h->q[i]);
8076 if (rc) {
8077 int j;
8078
8079 dev_err(&h->pdev->dev,
8080 "failed to get irq %d for %s\n",
8081 pci_irq_vector(h->pdev, i), h->devname);
8082 for (j = 0; j < i; j++) {
8083 free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
8084 h->q[j] = 0;
8085 }
8086 for (; j < MAX_REPLY_QUEUES; j++)
8087 h->q[j] = 0;
8088 return rc;
8089 }
8090 }
8091 } else {
8092
8093 if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
8094 sprintf(h->intrname[0], "%s-msi%s", h->devname,
8095 h->msix_vectors ? "x" : "");
8096 rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
8097 msixhandler, 0,
8098 h->intrname[0],
8099 &h->q[h->intr_mode]);
8100 } else {
8101 sprintf(h->intrname[h->intr_mode],
8102 "%s-intx", h->devname);
8103 rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
8104 intxhandler, IRQF_SHARED,
8105 h->intrname[0],
8106 &h->q[h->intr_mode]);
8107 }
8108 }
8109 if (rc) {
8110 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
8111 pci_irq_vector(h->pdev, irq_vector), h->devname);
8112 hpsa_free_irqs(h);
8113 return -ENODEV;
8114 }
8115 return 0;
8116 }
8117
8118 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
8119 {
8120 int rc;
8121 hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER);
8122
8123 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
8124 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
8125 if (rc) {
8126 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
8127 return rc;
8128 }
8129
8130 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
8131 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8132 if (rc) {
8133 dev_warn(&h->pdev->dev, "Board failed to become ready "
8134 "after soft reset.\n");
8135 return rc;
8136 }
8137
8138 return 0;
8139 }
8140
8141 static void hpsa_free_reply_queues(struct ctlr_info *h)
8142 {
8143 int i;
8144
8145 for (i = 0; i < h->nreply_queues; i++) {
8146 if (!h->reply_queue[i].head)
8147 continue;
8148 dma_free_coherent(&h->pdev->dev,
8149 h->reply_queue_size,
8150 h->reply_queue[i].head,
8151 h->reply_queue[i].busaddr);
8152 h->reply_queue[i].head = NULL;
8153 h->reply_queue[i].busaddr = 0;
8154 }
8155 h->reply_queue_size = 0;
8156 }
8157
8158 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
8159 {
8160 hpsa_free_performant_mode(h);
8161 hpsa_free_sg_chain_blocks(h);
8162 hpsa_free_cmd_pool(h);
8163 hpsa_free_irqs(h);
8164 scsi_host_put(h->scsi_host);
8165 h->scsi_host = NULL;
8166 hpsa_free_pci_init(h);
8167 free_percpu(h->lockup_detected);
8168 h->lockup_detected = NULL;
8169 if (h->resubmit_wq) {
8170 destroy_workqueue(h->resubmit_wq);
8171 h->resubmit_wq = NULL;
8172 }
8173 if (h->rescan_ctlr_wq) {
8174 destroy_workqueue(h->rescan_ctlr_wq);
8175 h->rescan_ctlr_wq = NULL;
8176 }
8177 if (h->monitor_ctlr_wq) {
8178 destroy_workqueue(h->monitor_ctlr_wq);
8179 h->monitor_ctlr_wq = NULL;
8180 }
8181
8182 kfree(h);
8183 }
8184
8185
8186 static void fail_all_outstanding_cmds(struct ctlr_info *h)
8187 {
8188 int i, refcount;
8189 struct CommandList *c;
8190 int failcount = 0;
8191
8192 flush_workqueue(h->resubmit_wq);
8193 for (i = 0; i < h->nr_cmds; i++) {
8194 c = h->cmd_pool + i;
8195 refcount = atomic_inc_return(&c->refcount);
8196 if (refcount > 1) {
8197 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
8198 finish_cmd(c);
8199 atomic_dec(&h->commands_outstanding);
8200 failcount++;
8201 }
8202 cmd_free(h, c);
8203 }
8204 dev_warn(&h->pdev->dev,
8205 "failed %d commands in fail_all\n", failcount);
8206 }
8207
8208 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
8209 {
8210 int cpu;
8211
8212 for_each_online_cpu(cpu) {
8213 u32 *lockup_detected;
8214 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
8215 *lockup_detected = value;
8216 }
8217 wmb();
8218 }
8219
8220 static void controller_lockup_detected(struct ctlr_info *h)
8221 {
8222 unsigned long flags;
8223 u32 lockup_detected;
8224
8225 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8226 spin_lock_irqsave(&h->lock, flags);
8227 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
8228 if (!lockup_detected) {
8229
8230 dev_warn(&h->pdev->dev,
8231 "lockup detected after %d but scratchpad register is zero\n",
8232 h->heartbeat_sample_interval / HZ);
8233 lockup_detected = 0xffffffff;
8234 }
8235 set_lockup_detected_for_all_cpus(h, lockup_detected);
8236 spin_unlock_irqrestore(&h->lock, flags);
8237 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8238 lockup_detected, h->heartbeat_sample_interval / HZ);
8239 if (lockup_detected == 0xffff0000) {
8240 dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
8241 writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
8242 }
8243 pci_disable_device(h->pdev);
8244 fail_all_outstanding_cmds(h);
8245 }
8246
8247 static int detect_controller_lockup(struct ctlr_info *h)
8248 {
8249 u64 now;
8250 u32 heartbeat;
8251 unsigned long flags;
8252
8253 now = get_jiffies_64();
8254
8255 if (time_after64(h->last_intr_timestamp +
8256 (h->heartbeat_sample_interval), now))
8257 return false;
8258
8259
8260
8261
8262
8263
8264 if (time_after64(h->last_heartbeat_timestamp +
8265 (h->heartbeat_sample_interval), now))
8266 return false;
8267
8268
8269 spin_lock_irqsave(&h->lock, flags);
8270 heartbeat = readl(&h->cfgtable->HeartBeat);
8271 spin_unlock_irqrestore(&h->lock, flags);
8272 if (h->last_heartbeat == heartbeat) {
8273 controller_lockup_detected(h);
8274 return true;
8275 }
8276
8277
8278 h->last_heartbeat = heartbeat;
8279 h->last_heartbeat_timestamp = now;
8280 return false;
8281 }
8282
8283
8284
8285
8286
8287
8288
8289
8290
8291
8292 static void hpsa_set_ioaccel_status(struct ctlr_info *h)
8293 {
8294 int rc;
8295 int i;
8296 u8 ioaccel_status;
8297 unsigned char *buf;
8298 struct hpsa_scsi_dev_t *device;
8299
8300 if (!h)
8301 return;
8302
8303 buf = kmalloc(64, GFP_KERNEL);
8304 if (!buf)
8305 return;
8306
8307
8308
8309
8310 for (i = 0; i < h->ndevices; i++) {
8311 device = h->dev[i];
8312
8313 if (!device)
8314 continue;
8315 if (!hpsa_vpd_page_supported(h, device->scsi3addr,
8316 HPSA_VPD_LV_IOACCEL_STATUS))
8317 continue;
8318
8319 memset(buf, 0, 64);
8320
8321 rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
8322 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
8323 buf, 64);
8324 if (rc != 0)
8325 continue;
8326
8327 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
8328 device->offload_config =
8329 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
8330 if (device->offload_config)
8331 device->offload_to_be_enabled =
8332 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
8333
8334
8335
8336
8337
8338
8339
8340
8341
8342
8343
8344
8345 if (!device->offload_to_be_enabled)
8346 device->offload_enabled = 0;
8347 }
8348
8349 kfree(buf);
8350 }
8351
8352 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8353 {
8354 char *event_type;
8355
8356 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8357 return;
8358
8359
8360 if ((h->transMethod & (CFGTBL_Trans_io_accel1
8361 | CFGTBL_Trans_io_accel2)) &&
8362 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8363 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8364
8365 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8366 event_type = "state change";
8367 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8368 event_type = "configuration change";
8369
8370 scsi_block_requests(h->scsi_host);
8371 hpsa_set_ioaccel_status(h);
8372 hpsa_drain_accel_commands(h);
8373
8374 dev_warn(&h->pdev->dev,
8375 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8376 h->events, event_type);
8377 writel(h->events, &(h->cfgtable->clear_event_notify));
8378
8379 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8380
8381 hpsa_wait_for_clear_event_notify_ack(h);
8382 scsi_unblock_requests(h->scsi_host);
8383 } else {
8384
8385 writel(h->events, &(h->cfgtable->clear_event_notify));
8386 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8387 hpsa_wait_for_clear_event_notify_ack(h);
8388 }
8389 return;
8390 }
8391
8392
8393
8394
8395
8396
8397 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
8398 {
8399 if (h->drv_req_rescan) {
8400 h->drv_req_rescan = 0;
8401 return 1;
8402 }
8403
8404 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8405 return 0;
8406
8407 h->events = readl(&(h->cfgtable->event_notify));
8408 return h->events & RESCAN_REQUIRED_EVENT_BITS;
8409 }
8410
8411
8412
8413
8414 static int hpsa_offline_devices_ready(struct ctlr_info *h)
8415 {
8416 unsigned long flags;
8417 struct offline_device_entry *d;
8418 struct list_head *this, *tmp;
8419
8420 spin_lock_irqsave(&h->offline_device_lock, flags);
8421 list_for_each_safe(this, tmp, &h->offline_device_list) {
8422 d = list_entry(this, struct offline_device_entry,
8423 offline_list);
8424 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8425 if (!hpsa_volume_offline(h, d->scsi3addr)) {
8426 spin_lock_irqsave(&h->offline_device_lock, flags);
8427 list_del(&d->offline_list);
8428 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8429 return 1;
8430 }
8431 spin_lock_irqsave(&h->offline_device_lock, flags);
8432 }
8433 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8434 return 0;
8435 }
8436
8437 static int hpsa_luns_changed(struct ctlr_info *h)
8438 {
8439 int rc = 1;
8440 struct ReportLUNdata *logdev = NULL;
8441
8442
8443
8444
8445
8446 if (!h->lastlogicals)
8447 return rc;
8448
8449 logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
8450 if (!logdev)
8451 return rc;
8452
8453 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8454 dev_warn(&h->pdev->dev,
8455 "report luns failed, can't track lun changes.\n");
8456 goto out;
8457 }
8458 if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
8459 dev_info(&h->pdev->dev,
8460 "Lun changes detected.\n");
8461 memcpy(h->lastlogicals, logdev, sizeof(*logdev));
8462 goto out;
8463 } else
8464 rc = 0;
8465 out:
8466 kfree(logdev);
8467 return rc;
8468 }
8469
8470 static void hpsa_perform_rescan(struct ctlr_info *h)
8471 {
8472 struct Scsi_Host *sh = NULL;
8473 unsigned long flags;
8474
8475
8476
8477
8478 spin_lock_irqsave(&h->reset_lock, flags);
8479 if (h->reset_in_progress) {
8480 h->drv_req_rescan = 1;
8481 spin_unlock_irqrestore(&h->reset_lock, flags);
8482 return;
8483 }
8484 spin_unlock_irqrestore(&h->reset_lock, flags);
8485
8486 sh = scsi_host_get(h->scsi_host);
8487 if (sh != NULL) {
8488 hpsa_scan_start(sh);
8489 scsi_host_put(sh);
8490 h->drv_req_rescan = 0;
8491 }
8492 }
8493
8494
8495
8496
8497 static void hpsa_event_monitor_worker(struct work_struct *work)
8498 {
8499 struct ctlr_info *h = container_of(to_delayed_work(work),
8500 struct ctlr_info, event_monitor_work);
8501 unsigned long flags;
8502
8503 spin_lock_irqsave(&h->lock, flags);
8504 if (h->remove_in_progress) {
8505 spin_unlock_irqrestore(&h->lock, flags);
8506 return;
8507 }
8508 spin_unlock_irqrestore(&h->lock, flags);
8509
8510 if (hpsa_ctlr_needs_rescan(h)) {
8511 hpsa_ack_ctlr_events(h);
8512 hpsa_perform_rescan(h);
8513 }
8514
8515 spin_lock_irqsave(&h->lock, flags);
8516 if (!h->remove_in_progress)
8517 queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work,
8518 HPSA_EVENT_MONITOR_INTERVAL);
8519 spin_unlock_irqrestore(&h->lock, flags);
8520 }
8521
8522 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8523 {
8524 unsigned long flags;
8525 struct ctlr_info *h = container_of(to_delayed_work(work),
8526 struct ctlr_info, rescan_ctlr_work);
8527
8528 spin_lock_irqsave(&h->lock, flags);
8529 if (h->remove_in_progress) {
8530 spin_unlock_irqrestore(&h->lock, flags);
8531 return;
8532 }
8533 spin_unlock_irqrestore(&h->lock, flags);
8534
8535 if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
8536 hpsa_perform_rescan(h);
8537 } else if (h->discovery_polling) {
8538 if (hpsa_luns_changed(h)) {
8539 dev_info(&h->pdev->dev,
8540 "driver discovery polling rescan.\n");
8541 hpsa_perform_rescan(h);
8542 }
8543 }
8544 spin_lock_irqsave(&h->lock, flags);
8545 if (!h->remove_in_progress)
8546 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8547 h->heartbeat_sample_interval);
8548 spin_unlock_irqrestore(&h->lock, flags);
8549 }
8550
8551 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8552 {
8553 unsigned long flags;
8554 struct ctlr_info *h = container_of(to_delayed_work(work),
8555 struct ctlr_info, monitor_ctlr_work);
8556
8557 detect_controller_lockup(h);
8558 if (lockup_detected(h))
8559 return;
8560
8561 spin_lock_irqsave(&h->lock, flags);
8562 if (!h->remove_in_progress)
8563 queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work,
8564 h->heartbeat_sample_interval);
8565 spin_unlock_irqrestore(&h->lock, flags);
8566 }
8567
8568 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8569 char *name)
8570 {
8571 struct workqueue_struct *wq = NULL;
8572
8573 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8574 if (!wq)
8575 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8576
8577 return wq;
8578 }
8579
8580 static void hpda_free_ctlr_info(struct ctlr_info *h)
8581 {
8582 kfree(h->reply_map);
8583 kfree(h);
8584 }
8585
8586 static struct ctlr_info *hpda_alloc_ctlr_info(void)
8587 {
8588 struct ctlr_info *h;
8589
8590 h = kzalloc(sizeof(*h), GFP_KERNEL);
8591 if (!h)
8592 return NULL;
8593
8594 h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL);
8595 if (!h->reply_map) {
8596 kfree(h);
8597 return NULL;
8598 }
8599 return h;
8600 }
8601
8602 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8603 {
8604 int dac, rc;
8605 struct ctlr_info *h;
8606 int try_soft_reset = 0;
8607 unsigned long flags;
8608 u32 board_id;
8609
8610 if (number_of_controllers == 0)
8611 printk(KERN_INFO DRIVER_NAME "\n");
8612
8613 rc = hpsa_lookup_board_id(pdev, &board_id, NULL);
8614 if (rc < 0) {
8615 dev_warn(&pdev->dev, "Board ID not found\n");
8616 return rc;
8617 }
8618
8619 rc = hpsa_init_reset_devices(pdev, board_id);
8620 if (rc) {
8621 if (rc != -ENOTSUPP)
8622 return rc;
8623
8624
8625
8626
8627
8628 try_soft_reset = 1;
8629 rc = 0;
8630 }
8631
8632 reinit_after_soft_reset:
8633
8634
8635
8636
8637
8638 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8639 h = hpda_alloc_ctlr_info();
8640 if (!h) {
8641 dev_err(&pdev->dev, "Failed to allocate controller head\n");
8642 return -ENOMEM;
8643 }
8644
8645 h->pdev = pdev;
8646
8647 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8648 INIT_LIST_HEAD(&h->offline_device_list);
8649 spin_lock_init(&h->lock);
8650 spin_lock_init(&h->offline_device_lock);
8651 spin_lock_init(&h->scan_lock);
8652 spin_lock_init(&h->reset_lock);
8653 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8654
8655
8656 h->lockup_detected = alloc_percpu(u32);
8657 if (!h->lockup_detected) {
8658 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8659 rc = -ENOMEM;
8660 goto clean1;
8661 }
8662 set_lockup_detected_for_all_cpus(h, 0);
8663
8664 rc = hpsa_pci_init(h);
8665 if (rc)
8666 goto clean2;
8667
8668
8669
8670 rc = hpsa_scsi_host_alloc(h);
8671 if (rc)
8672 goto clean2_5;
8673
8674 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8675 h->ctlr = number_of_controllers;
8676 number_of_controllers++;
8677
8678
8679 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
8680 if (rc == 0) {
8681 dac = 1;
8682 } else {
8683 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
8684 if (rc == 0) {
8685 dac = 0;
8686 } else {
8687 dev_err(&pdev->dev, "no suitable DMA available\n");
8688 goto clean3;
8689 }
8690 }
8691
8692
8693 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8694
8695 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8696 if (rc)
8697 goto clean3;
8698 rc = hpsa_alloc_cmd_pool(h);
8699 if (rc)
8700 goto clean4;
8701 rc = hpsa_alloc_sg_chain_blocks(h);
8702 if (rc)
8703 goto clean5;
8704 init_waitqueue_head(&h->scan_wait_queue);
8705 init_waitqueue_head(&h->event_sync_wait_queue);
8706 mutex_init(&h->reset_mutex);
8707 h->scan_finished = 1;
8708 h->scan_waiting = 0;
8709
8710 pci_set_drvdata(pdev, h);
8711 h->ndevices = 0;
8712
8713 spin_lock_init(&h->devlock);
8714 rc = hpsa_put_ctlr_into_performant_mode(h);
8715 if (rc)
8716 goto clean6;
8717
8718
8719 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8720 if (!h->rescan_ctlr_wq) {
8721 rc = -ENOMEM;
8722 goto clean7;
8723 }
8724
8725 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8726 if (!h->resubmit_wq) {
8727 rc = -ENOMEM;
8728 goto clean7;
8729 }
8730
8731 h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor");
8732 if (!h->monitor_ctlr_wq) {
8733 rc = -ENOMEM;
8734 goto clean7;
8735 }
8736
8737
8738
8739
8740
8741
8742 if (try_soft_reset) {
8743
8744
8745
8746
8747
8748
8749
8750
8751 spin_lock_irqsave(&h->lock, flags);
8752 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8753 spin_unlock_irqrestore(&h->lock, flags);
8754 hpsa_free_irqs(h);
8755 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8756 hpsa_intx_discard_completions);
8757 if (rc) {
8758 dev_warn(&h->pdev->dev,
8759 "Failed to request_irq after soft reset.\n");
8760
8761
8762
8763
8764 hpsa_free_performant_mode(h);
8765 hpsa_free_sg_chain_blocks(h);
8766 hpsa_free_cmd_pool(h);
8767
8768
8769
8770
8771 goto clean3;
8772 }
8773
8774 rc = hpsa_kdump_soft_reset(h);
8775 if (rc)
8776
8777 goto clean7;
8778
8779 dev_info(&h->pdev->dev, "Board READY.\n");
8780 dev_info(&h->pdev->dev,
8781 "Waiting for stale completions to drain.\n");
8782 h->access.set_intr_mask(h, HPSA_INTR_ON);
8783 msleep(10000);
8784 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8785
8786 rc = controller_reset_failed(h->cfgtable);
8787 if (rc)
8788 dev_info(&h->pdev->dev,
8789 "Soft reset appears to have failed.\n");
8790
8791
8792
8793
8794
8795 hpsa_undo_allocations_after_kdump_soft_reset(h);
8796 try_soft_reset = 0;
8797 if (rc)
8798
8799 return -ENODEV;
8800
8801 goto reinit_after_soft_reset;
8802 }
8803
8804
8805 h->acciopath_status = 1;
8806
8807 h->discovery_polling = 0;
8808
8809
8810
8811 h->access.set_intr_mask(h, HPSA_INTR_ON);
8812
8813 hpsa_hba_inquiry(h);
8814
8815 h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
8816 if (!h->lastlogicals)
8817 dev_info(&h->pdev->dev,
8818 "Can't track change to report lun data\n");
8819
8820
8821 rc = hpsa_scsi_add_host(h);
8822 if (rc)
8823 goto clean7;
8824
8825
8826 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8827 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8828 schedule_delayed_work(&h->monitor_ctlr_work,
8829 h->heartbeat_sample_interval);
8830 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8831 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8832 h->heartbeat_sample_interval);
8833 INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker);
8834 schedule_delayed_work(&h->event_monitor_work,
8835 HPSA_EVENT_MONITOR_INTERVAL);
8836 return 0;
8837
8838 clean7:
8839 hpsa_free_performant_mode(h);
8840 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8841 clean6:
8842 hpsa_free_sg_chain_blocks(h);
8843 clean5:
8844 hpsa_free_cmd_pool(h);
8845 clean4:
8846 hpsa_free_irqs(h);
8847 clean3:
8848 scsi_host_put(h->scsi_host);
8849 h->scsi_host = NULL;
8850 clean2_5:
8851 hpsa_free_pci_init(h);
8852 clean2:
8853 if (h->lockup_detected) {
8854 free_percpu(h->lockup_detected);
8855 h->lockup_detected = NULL;
8856 }
8857 clean1:
8858 if (h->resubmit_wq) {
8859 destroy_workqueue(h->resubmit_wq);
8860 h->resubmit_wq = NULL;
8861 }
8862 if (h->rescan_ctlr_wq) {
8863 destroy_workqueue(h->rescan_ctlr_wq);
8864 h->rescan_ctlr_wq = NULL;
8865 }
8866 if (h->monitor_ctlr_wq) {
8867 destroy_workqueue(h->monitor_ctlr_wq);
8868 h->monitor_ctlr_wq = NULL;
8869 }
8870 kfree(h);
8871 return rc;
8872 }
8873
8874 static void hpsa_flush_cache(struct ctlr_info *h)
8875 {
8876 char *flush_buf;
8877 struct CommandList *c;
8878 int rc;
8879
8880 if (unlikely(lockup_detected(h)))
8881 return;
8882 flush_buf = kzalloc(4, GFP_KERNEL);
8883 if (!flush_buf)
8884 return;
8885
8886 c = cmd_alloc(h);
8887
8888 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8889 RAID_CTLR_LUNID, TYPE_CMD)) {
8890 goto out;
8891 }
8892 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
8893 DEFAULT_TIMEOUT);
8894 if (rc)
8895 goto out;
8896 if (c->err_info->CommandStatus != 0)
8897 out:
8898 dev_warn(&h->pdev->dev,
8899 "error flushing cache on controller\n");
8900 cmd_free(h, c);
8901 kfree(flush_buf);
8902 }
8903
8904
8905
8906
8907 static void hpsa_disable_rld_caching(struct ctlr_info *h)
8908 {
8909 u32 *options;
8910 struct CommandList *c;
8911 int rc;
8912
8913
8914 if (unlikely(h->lockup_detected))
8915 return;
8916
8917 options = kzalloc(sizeof(*options), GFP_KERNEL);
8918 if (!options)
8919 return;
8920
8921 c = cmd_alloc(h);
8922
8923
8924 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8925 RAID_CTLR_LUNID, TYPE_CMD))
8926 goto errout;
8927
8928 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
8929 NO_TIMEOUT);
8930 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8931 goto errout;
8932
8933
8934 *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
8935
8936 if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
8937 RAID_CTLR_LUNID, TYPE_CMD))
8938 goto errout;
8939
8940 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
8941 NO_TIMEOUT);
8942 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8943 goto errout;
8944
8945
8946 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8947 RAID_CTLR_LUNID, TYPE_CMD))
8948 goto errout;
8949
8950 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
8951 NO_TIMEOUT);
8952 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8953 goto errout;
8954
8955 if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
8956 goto out;
8957
8958 errout:
8959 dev_err(&h->pdev->dev,
8960 "Error: failed to disable report lun data caching.\n");
8961 out:
8962 cmd_free(h, c);
8963 kfree(options);
8964 }
8965
8966 static void __hpsa_shutdown(struct pci_dev *pdev)
8967 {
8968 struct ctlr_info *h;
8969
8970 h = pci_get_drvdata(pdev);
8971
8972
8973
8974
8975 hpsa_flush_cache(h);
8976 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8977 hpsa_free_irqs(h);
8978 hpsa_disable_interrupt_mode(h);
8979 }
8980
8981 static void hpsa_shutdown(struct pci_dev *pdev)
8982 {
8983 __hpsa_shutdown(pdev);
8984 pci_disable_device(pdev);
8985 }
8986
8987 static void hpsa_free_device_info(struct ctlr_info *h)
8988 {
8989 int i;
8990
8991 for (i = 0; i < h->ndevices; i++) {
8992 kfree(h->dev[i]);
8993 h->dev[i] = NULL;
8994 }
8995 }
8996
8997 static void hpsa_remove_one(struct pci_dev *pdev)
8998 {
8999 struct ctlr_info *h;
9000 unsigned long flags;
9001
9002 if (pci_get_drvdata(pdev) == NULL) {
9003 dev_err(&pdev->dev, "unable to remove device\n");
9004 return;
9005 }
9006 h = pci_get_drvdata(pdev);
9007
9008
9009 spin_lock_irqsave(&h->lock, flags);
9010 h->remove_in_progress = 1;
9011 spin_unlock_irqrestore(&h->lock, flags);
9012 cancel_delayed_work_sync(&h->monitor_ctlr_work);
9013 cancel_delayed_work_sync(&h->rescan_ctlr_work);
9014 cancel_delayed_work_sync(&h->event_monitor_work);
9015 destroy_workqueue(h->rescan_ctlr_wq);
9016 destroy_workqueue(h->resubmit_wq);
9017 destroy_workqueue(h->monitor_ctlr_wq);
9018
9019 hpsa_delete_sas_host(h);
9020
9021
9022
9023
9024
9025
9026
9027 if (h->scsi_host)
9028 scsi_remove_host(h->scsi_host);
9029
9030
9031 __hpsa_shutdown(pdev);
9032
9033 hpsa_free_device_info(h);
9034
9035 kfree(h->hba_inquiry_data);
9036 h->hba_inquiry_data = NULL;
9037 hpsa_free_ioaccel2_sg_chain_blocks(h);
9038 hpsa_free_performant_mode(h);
9039 hpsa_free_sg_chain_blocks(h);
9040 hpsa_free_cmd_pool(h);
9041 kfree(h->lastlogicals);
9042
9043
9044
9045 scsi_host_put(h->scsi_host);
9046 h->scsi_host = NULL;
9047
9048
9049 hpsa_free_pci_init(h);
9050
9051 free_percpu(h->lockup_detected);
9052 h->lockup_detected = NULL;
9053
9054
9055 hpda_free_ctlr_info(h);
9056 }
9057
9058 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
9059 __attribute__((unused)) pm_message_t state)
9060 {
9061 return -ENOSYS;
9062 }
9063
9064 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
9065 {
9066 return -ENOSYS;
9067 }
9068
9069 static struct pci_driver hpsa_pci_driver = {
9070 .name = HPSA,
9071 .probe = hpsa_init_one,
9072 .remove = hpsa_remove_one,
9073 .id_table = hpsa_pci_device_id,
9074 .shutdown = hpsa_shutdown,
9075 .suspend = hpsa_suspend,
9076 .resume = hpsa_resume,
9077 };
9078
9079
9080
9081
9082
9083
9084
9085
9086
9087
9088
9089
9090
9091 static void calc_bucket_map(int bucket[], int num_buckets,
9092 int nsgs, int min_blocks, u32 *bucket_map)
9093 {
9094 int i, j, b, size;
9095
9096
9097 for (i = 0; i <= nsgs; i++) {
9098
9099 size = i + min_blocks;
9100 b = num_buckets;
9101
9102 for (j = 0; j < num_buckets; j++) {
9103 if (bucket[j] >= size) {
9104 b = j;
9105 break;
9106 }
9107 }
9108
9109 bucket_map[i] = b;
9110 }
9111 }
9112
9113
9114
9115
9116
9117 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
9118 {
9119 int i;
9120 unsigned long register_value;
9121 unsigned long transMethod = CFGTBL_Trans_Performant |
9122 (trans_support & CFGTBL_Trans_use_short_tags) |
9123 CFGTBL_Trans_enable_directed_msix |
9124 (trans_support & (CFGTBL_Trans_io_accel1 |
9125 CFGTBL_Trans_io_accel2));
9126 struct access_method access = SA5_performant_access;
9127
9128
9129
9130
9131
9132
9133
9134
9135
9136
9137
9138
9139
9140
9141
9142
9143
9144
9145 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
9146 #define MIN_IOACCEL2_BFT_ENTRY 5
9147 #define HPSA_IOACCEL2_HEADER_SZ 4
9148 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
9149 13, 14, 15, 16, 17, 18, 19,
9150 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
9151 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
9152 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
9153 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
9154 16 * MIN_IOACCEL2_BFT_ENTRY);
9155 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
9156 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
9157
9158
9159
9160
9161
9162
9163
9164
9165
9166
9167 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
9168 access = SA5_performant_access_no_read;
9169
9170
9171 for (i = 0; i < h->nreply_queues; i++)
9172 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
9173
9174 bft[7] = SG_ENTRIES_IN_CMD + 4;
9175 calc_bucket_map(bft, ARRAY_SIZE(bft),
9176 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
9177 for (i = 0; i < 8; i++)
9178 writel(bft[i], &h->transtable->BlockFetch[i]);
9179
9180
9181 writel(h->max_commands, &h->transtable->RepQSize);
9182 writel(h->nreply_queues, &h->transtable->RepQCount);
9183 writel(0, &h->transtable->RepQCtrAddrLow32);
9184 writel(0, &h->transtable->RepQCtrAddrHigh32);
9185
9186 for (i = 0; i < h->nreply_queues; i++) {
9187 writel(0, &h->transtable->RepQAddr[i].upper);
9188 writel(h->reply_queue[i].busaddr,
9189 &h->transtable->RepQAddr[i].lower);
9190 }
9191
9192 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
9193 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
9194
9195
9196
9197 if (trans_support & CFGTBL_Trans_io_accel1) {
9198 access = SA5_ioaccel_mode1_access;
9199 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9200 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9201 } else
9202 if (trans_support & CFGTBL_Trans_io_accel2)
9203 access = SA5_ioaccel_mode2_access;
9204 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9205 if (hpsa_wait_for_mode_change_ack(h)) {
9206 dev_err(&h->pdev->dev,
9207 "performant mode problem - doorbell timeout\n");
9208 return -ENODEV;
9209 }
9210 register_value = readl(&(h->cfgtable->TransportActive));
9211 if (!(register_value & CFGTBL_Trans_Performant)) {
9212 dev_err(&h->pdev->dev,
9213 "performant mode problem - transport not active\n");
9214 return -ENODEV;
9215 }
9216
9217 h->access = access;
9218 h->transMethod = transMethod;
9219
9220 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
9221 (trans_support & CFGTBL_Trans_io_accel2)))
9222 return 0;
9223
9224 if (trans_support & CFGTBL_Trans_io_accel1) {
9225
9226 for (i = 0; i < h->nreply_queues; i++) {
9227 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
9228 h->reply_queue[i].current_entry =
9229 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
9230 }
9231 bft[7] = h->ioaccel_maxsg + 8;
9232 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
9233 h->ioaccel1_blockFetchTable);
9234
9235
9236 for (i = 0; i < h->nreply_queues; i++)
9237 memset(h->reply_queue[i].head,
9238 (u8) IOACCEL_MODE1_REPLY_UNUSED,
9239 h->reply_queue_size);
9240
9241
9242
9243
9244 for (i = 0; i < h->nr_cmds; i++) {
9245 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
9246
9247 cp->function = IOACCEL1_FUNCTION_SCSIIO;
9248 cp->err_info = (u32) (h->errinfo_pool_dhandle +
9249 (i * sizeof(struct ErrorInfo)));
9250 cp->err_info_len = sizeof(struct ErrorInfo);
9251 cp->sgl_offset = IOACCEL1_SGLOFFSET;
9252 cp->host_context_flags =
9253 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
9254 cp->timeout_sec = 0;
9255 cp->ReplyQueue = 0;
9256 cp->tag =
9257 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
9258 cp->host_addr =
9259 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
9260 (i * sizeof(struct io_accel1_cmd)));
9261 }
9262 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9263 u64 cfg_offset, cfg_base_addr_index;
9264 u32 bft2_offset, cfg_base_addr;
9265 int rc;
9266
9267 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
9268 &cfg_base_addr_index, &cfg_offset);
9269 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
9270 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
9271 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
9272 4, h->ioaccel2_blockFetchTable);
9273 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
9274 BUILD_BUG_ON(offsetof(struct CfgTable,
9275 io_accel_request_size_offset) != 0xb8);
9276 h->ioaccel2_bft2_regs =
9277 remap_pci_mem(pci_resource_start(h->pdev,
9278 cfg_base_addr_index) +
9279 cfg_offset + bft2_offset,
9280 ARRAY_SIZE(bft2) *
9281 sizeof(*h->ioaccel2_bft2_regs));
9282 for (i = 0; i < ARRAY_SIZE(bft2); i++)
9283 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
9284 }
9285 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9286 if (hpsa_wait_for_mode_change_ack(h)) {
9287 dev_err(&h->pdev->dev,
9288 "performant mode problem - enabling ioaccel mode\n");
9289 return -ENODEV;
9290 }
9291 return 0;
9292 }
9293
9294
9295 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9296 {
9297 if (h->ioaccel_cmd_pool) {
9298 pci_free_consistent(h->pdev,
9299 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9300 h->ioaccel_cmd_pool,
9301 h->ioaccel_cmd_pool_dhandle);
9302 h->ioaccel_cmd_pool = NULL;
9303 h->ioaccel_cmd_pool_dhandle = 0;
9304 }
9305 kfree(h->ioaccel1_blockFetchTable);
9306 h->ioaccel1_blockFetchTable = NULL;
9307 }
9308
9309
9310 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9311 {
9312 h->ioaccel_maxsg =
9313 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9314 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
9315 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
9316
9317
9318
9319
9320
9321 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9322 IOACCEL1_COMMANDLIST_ALIGNMENT);
9323 h->ioaccel_cmd_pool =
9324 dma_alloc_coherent(&h->pdev->dev,
9325 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9326 &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL);
9327
9328 h->ioaccel1_blockFetchTable =
9329 kmalloc(((h->ioaccel_maxsg + 1) *
9330 sizeof(u32)), GFP_KERNEL);
9331
9332 if ((h->ioaccel_cmd_pool == NULL) ||
9333 (h->ioaccel1_blockFetchTable == NULL))
9334 goto clean_up;
9335
9336 memset(h->ioaccel_cmd_pool, 0,
9337 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
9338 return 0;
9339
9340 clean_up:
9341 hpsa_free_ioaccel1_cmd_and_bft(h);
9342 return -ENOMEM;
9343 }
9344
9345
9346 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9347 {
9348 hpsa_free_ioaccel2_sg_chain_blocks(h);
9349
9350 if (h->ioaccel2_cmd_pool) {
9351 pci_free_consistent(h->pdev,
9352 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9353 h->ioaccel2_cmd_pool,
9354 h->ioaccel2_cmd_pool_dhandle);
9355 h->ioaccel2_cmd_pool = NULL;
9356 h->ioaccel2_cmd_pool_dhandle = 0;
9357 }
9358 kfree(h->ioaccel2_blockFetchTable);
9359 h->ioaccel2_blockFetchTable = NULL;
9360 }
9361
9362
9363 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9364 {
9365 int rc;
9366
9367
9368
9369 h->ioaccel_maxsg =
9370 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9371 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
9372 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
9373
9374 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9375 IOACCEL2_COMMANDLIST_ALIGNMENT);
9376 h->ioaccel2_cmd_pool =
9377 dma_alloc_coherent(&h->pdev->dev,
9378 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9379 &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL);
9380
9381 h->ioaccel2_blockFetchTable =
9382 kmalloc(((h->ioaccel_maxsg + 1) *
9383 sizeof(u32)), GFP_KERNEL);
9384
9385 if ((h->ioaccel2_cmd_pool == NULL) ||
9386 (h->ioaccel2_blockFetchTable == NULL)) {
9387 rc = -ENOMEM;
9388 goto clean_up;
9389 }
9390
9391 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
9392 if (rc)
9393 goto clean_up;
9394
9395 memset(h->ioaccel2_cmd_pool, 0,
9396 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
9397 return 0;
9398
9399 clean_up:
9400 hpsa_free_ioaccel2_cmd_and_bft(h);
9401 return rc;
9402 }
9403
9404
9405 static void hpsa_free_performant_mode(struct ctlr_info *h)
9406 {
9407 kfree(h->blockFetchTable);
9408 h->blockFetchTable = NULL;
9409 hpsa_free_reply_queues(h);
9410 hpsa_free_ioaccel1_cmd_and_bft(h);
9411 hpsa_free_ioaccel2_cmd_and_bft(h);
9412 }
9413
9414
9415
9416
9417 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9418 {
9419 u32 trans_support;
9420 unsigned long transMethod = CFGTBL_Trans_Performant |
9421 CFGTBL_Trans_use_short_tags;
9422 int i, rc;
9423
9424 if (hpsa_simple_mode)
9425 return 0;
9426
9427 trans_support = readl(&(h->cfgtable->TransportSupport));
9428 if (!(trans_support & PERFORMANT_MODE))
9429 return 0;
9430
9431
9432 if (trans_support & CFGTBL_Trans_io_accel1) {
9433 transMethod |= CFGTBL_Trans_io_accel1 |
9434 CFGTBL_Trans_enable_directed_msix;
9435 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
9436 if (rc)
9437 return rc;
9438 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9439 transMethod |= CFGTBL_Trans_io_accel2 |
9440 CFGTBL_Trans_enable_directed_msix;
9441 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
9442 if (rc)
9443 return rc;
9444 }
9445
9446 h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
9447 hpsa_get_max_perf_mode_cmds(h);
9448
9449 h->reply_queue_size = h->max_commands * sizeof(u64);
9450
9451 for (i = 0; i < h->nreply_queues; i++) {
9452 h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev,
9453 h->reply_queue_size,
9454 &h->reply_queue[i].busaddr,
9455 GFP_KERNEL);
9456 if (!h->reply_queue[i].head) {
9457 rc = -ENOMEM;
9458 goto clean1;
9459 }
9460 h->reply_queue[i].size = h->max_commands;
9461 h->reply_queue[i].wraparound = 1;
9462 h->reply_queue[i].current_entry = 0;
9463 }
9464
9465
9466 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
9467 sizeof(u32)), GFP_KERNEL);
9468 if (!h->blockFetchTable) {
9469 rc = -ENOMEM;
9470 goto clean1;
9471 }
9472
9473 rc = hpsa_enter_performant_mode(h, trans_support);
9474 if (rc)
9475 goto clean2;
9476 return 0;
9477
9478 clean2:
9479 kfree(h->blockFetchTable);
9480 h->blockFetchTable = NULL;
9481 clean1:
9482 hpsa_free_reply_queues(h);
9483 hpsa_free_ioaccel1_cmd_and_bft(h);
9484 hpsa_free_ioaccel2_cmd_and_bft(h);
9485 return rc;
9486 }
9487
9488 static int is_accelerated_cmd(struct CommandList *c)
9489 {
9490 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
9491 }
9492
9493 static void hpsa_drain_accel_commands(struct ctlr_info *h)
9494 {
9495 struct CommandList *c = NULL;
9496 int i, accel_cmds_out;
9497 int refcount;
9498
9499 do {
9500 accel_cmds_out = 0;
9501 for (i = 0; i < h->nr_cmds; i++) {
9502 c = h->cmd_pool + i;
9503 refcount = atomic_inc_return(&c->refcount);
9504 if (refcount > 1)
9505 accel_cmds_out += is_accelerated_cmd(c);
9506 cmd_free(h, c);
9507 }
9508 if (accel_cmds_out <= 0)
9509 break;
9510 msleep(100);
9511 } while (1);
9512 }
9513
9514 static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
9515 struct hpsa_sas_port *hpsa_sas_port)
9516 {
9517 struct hpsa_sas_phy *hpsa_sas_phy;
9518 struct sas_phy *phy;
9519
9520 hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
9521 if (!hpsa_sas_phy)
9522 return NULL;
9523
9524 phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
9525 hpsa_sas_port->next_phy_index);
9526 if (!phy) {
9527 kfree(hpsa_sas_phy);
9528 return NULL;
9529 }
9530
9531 hpsa_sas_port->next_phy_index++;
9532 hpsa_sas_phy->phy = phy;
9533 hpsa_sas_phy->parent_port = hpsa_sas_port;
9534
9535 return hpsa_sas_phy;
9536 }
9537
9538 static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9539 {
9540 struct sas_phy *phy = hpsa_sas_phy->phy;
9541
9542 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9543 if (hpsa_sas_phy->added_to_port)
9544 list_del(&hpsa_sas_phy->phy_list_entry);
9545 sas_phy_delete(phy);
9546 kfree(hpsa_sas_phy);
9547 }
9548
9549 static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9550 {
9551 int rc;
9552 struct hpsa_sas_port *hpsa_sas_port;
9553 struct sas_phy *phy;
9554 struct sas_identify *identify;
9555
9556 hpsa_sas_port = hpsa_sas_phy->parent_port;
9557 phy = hpsa_sas_phy->phy;
9558
9559 identify = &phy->identify;
9560 memset(identify, 0, sizeof(*identify));
9561 identify->sas_address = hpsa_sas_port->sas_address;
9562 identify->device_type = SAS_END_DEVICE;
9563 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9564 identify->target_port_protocols = SAS_PROTOCOL_STP;
9565 phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9566 phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9567 phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
9568 phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
9569 phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
9570
9571 rc = sas_phy_add(hpsa_sas_phy->phy);
9572 if (rc)
9573 return rc;
9574
9575 sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
9576 list_add_tail(&hpsa_sas_phy->phy_list_entry,
9577 &hpsa_sas_port->phy_list_head);
9578 hpsa_sas_phy->added_to_port = true;
9579
9580 return 0;
9581 }
9582
9583 static int
9584 hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
9585 struct sas_rphy *rphy)
9586 {
9587 struct sas_identify *identify;
9588
9589 identify = &rphy->identify;
9590 identify->sas_address = hpsa_sas_port->sas_address;
9591 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9592 identify->target_port_protocols = SAS_PROTOCOL_STP;
9593
9594 return sas_rphy_add(rphy);
9595 }
9596
9597 static struct hpsa_sas_port
9598 *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
9599 u64 sas_address)
9600 {
9601 int rc;
9602 struct hpsa_sas_port *hpsa_sas_port;
9603 struct sas_port *port;
9604
9605 hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
9606 if (!hpsa_sas_port)
9607 return NULL;
9608
9609 INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
9610 hpsa_sas_port->parent_node = hpsa_sas_node;
9611
9612 port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
9613 if (!port)
9614 goto free_hpsa_port;
9615
9616 rc = sas_port_add(port);
9617 if (rc)
9618 goto free_sas_port;
9619
9620 hpsa_sas_port->port = port;
9621 hpsa_sas_port->sas_address = sas_address;
9622 list_add_tail(&hpsa_sas_port->port_list_entry,
9623 &hpsa_sas_node->port_list_head);
9624
9625 return hpsa_sas_port;
9626
9627 free_sas_port:
9628 sas_port_free(port);
9629 free_hpsa_port:
9630 kfree(hpsa_sas_port);
9631
9632 return NULL;
9633 }
9634
9635 static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
9636 {
9637 struct hpsa_sas_phy *hpsa_sas_phy;
9638 struct hpsa_sas_phy *next;
9639
9640 list_for_each_entry_safe(hpsa_sas_phy, next,
9641 &hpsa_sas_port->phy_list_head, phy_list_entry)
9642 hpsa_free_sas_phy(hpsa_sas_phy);
9643
9644 sas_port_delete(hpsa_sas_port->port);
9645 list_del(&hpsa_sas_port->port_list_entry);
9646 kfree(hpsa_sas_port);
9647 }
9648
9649 static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
9650 {
9651 struct hpsa_sas_node *hpsa_sas_node;
9652
9653 hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
9654 if (hpsa_sas_node) {
9655 hpsa_sas_node->parent_dev = parent_dev;
9656 INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
9657 }
9658
9659 return hpsa_sas_node;
9660 }
9661
9662 static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
9663 {
9664 struct hpsa_sas_port *hpsa_sas_port;
9665 struct hpsa_sas_port *next;
9666
9667 if (!hpsa_sas_node)
9668 return;
9669
9670 list_for_each_entry_safe(hpsa_sas_port, next,
9671 &hpsa_sas_node->port_list_head, port_list_entry)
9672 hpsa_free_sas_port(hpsa_sas_port);
9673
9674 kfree(hpsa_sas_node);
9675 }
9676
9677 static struct hpsa_scsi_dev_t
9678 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
9679 struct sas_rphy *rphy)
9680 {
9681 int i;
9682 struct hpsa_scsi_dev_t *device;
9683
9684 for (i = 0; i < h->ndevices; i++) {
9685 device = h->dev[i];
9686 if (!device->sas_port)
9687 continue;
9688 if (device->sas_port->rphy == rphy)
9689 return device;
9690 }
9691
9692 return NULL;
9693 }
9694
9695 static int hpsa_add_sas_host(struct ctlr_info *h)
9696 {
9697 int rc;
9698 struct device *parent_dev;
9699 struct hpsa_sas_node *hpsa_sas_node;
9700 struct hpsa_sas_port *hpsa_sas_port;
9701 struct hpsa_sas_phy *hpsa_sas_phy;
9702
9703 parent_dev = &h->scsi_host->shost_dev;
9704
9705 hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9706 if (!hpsa_sas_node)
9707 return -ENOMEM;
9708
9709 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
9710 if (!hpsa_sas_port) {
9711 rc = -ENODEV;
9712 goto free_sas_node;
9713 }
9714
9715 hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
9716 if (!hpsa_sas_phy) {
9717 rc = -ENODEV;
9718 goto free_sas_port;
9719 }
9720
9721 rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
9722 if (rc)
9723 goto free_sas_phy;
9724
9725 h->sas_host = hpsa_sas_node;
9726
9727 return 0;
9728
9729 free_sas_phy:
9730 hpsa_free_sas_phy(hpsa_sas_phy);
9731 free_sas_port:
9732 hpsa_free_sas_port(hpsa_sas_port);
9733 free_sas_node:
9734 hpsa_free_sas_node(hpsa_sas_node);
9735
9736 return rc;
9737 }
9738
9739 static void hpsa_delete_sas_host(struct ctlr_info *h)
9740 {
9741 hpsa_free_sas_node(h->sas_host);
9742 }
9743
9744 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
9745 struct hpsa_scsi_dev_t *device)
9746 {
9747 int rc;
9748 struct hpsa_sas_port *hpsa_sas_port;
9749 struct sas_rphy *rphy;
9750
9751 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
9752 if (!hpsa_sas_port)
9753 return -ENOMEM;
9754
9755 rphy = sas_end_device_alloc(hpsa_sas_port->port);
9756 if (!rphy) {
9757 rc = -ENODEV;
9758 goto free_sas_port;
9759 }
9760
9761 hpsa_sas_port->rphy = rphy;
9762 device->sas_port = hpsa_sas_port;
9763
9764 rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
9765 if (rc)
9766 goto free_sas_port;
9767
9768 return 0;
9769
9770 free_sas_port:
9771 hpsa_free_sas_port(hpsa_sas_port);
9772 device->sas_port = NULL;
9773
9774 return rc;
9775 }
9776
9777 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
9778 {
9779 if (device->sas_port) {
9780 hpsa_free_sas_port(device->sas_port);
9781 device->sas_port = NULL;
9782 }
9783 }
9784
9785 static int
9786 hpsa_sas_get_linkerrors(struct sas_phy *phy)
9787 {
9788 return 0;
9789 }
9790
9791 static int
9792 hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9793 {
9794 struct Scsi_Host *shost = phy_to_shost(rphy);
9795 struct ctlr_info *h;
9796 struct hpsa_scsi_dev_t *sd;
9797
9798 if (!shost)
9799 return -ENXIO;
9800
9801 h = shost_to_hba(shost);
9802
9803 if (!h)
9804 return -ENXIO;
9805
9806 sd = hpsa_find_device_by_sas_rphy(h, rphy);
9807 if (!sd)
9808 return -ENXIO;
9809
9810 *identifier = sd->eli;
9811
9812 return 0;
9813 }
9814
9815 static int
9816 hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
9817 {
9818 return -ENXIO;
9819 }
9820
9821 static int
9822 hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
9823 {
9824 return 0;
9825 }
9826
9827 static int
9828 hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
9829 {
9830 return 0;
9831 }
9832
9833 static int
9834 hpsa_sas_phy_setup(struct sas_phy *phy)
9835 {
9836 return 0;
9837 }
9838
9839 static void
9840 hpsa_sas_phy_release(struct sas_phy *phy)
9841 {
9842 }
9843
9844 static int
9845 hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
9846 {
9847 return -EINVAL;
9848 }
9849
9850 static struct sas_function_template hpsa_sas_transport_functions = {
9851 .get_linkerrors = hpsa_sas_get_linkerrors,
9852 .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
9853 .get_bay_identifier = hpsa_sas_get_bay_identifier,
9854 .phy_reset = hpsa_sas_phy_reset,
9855 .phy_enable = hpsa_sas_phy_enable,
9856 .phy_setup = hpsa_sas_phy_setup,
9857 .phy_release = hpsa_sas_phy_release,
9858 .set_phy_speed = hpsa_sas_phy_speed,
9859 };
9860
9861
9862
9863
9864
9865 static int __init hpsa_init(void)
9866 {
9867 int rc;
9868
9869 hpsa_sas_transport_template =
9870 sas_attach_transport(&hpsa_sas_transport_functions);
9871 if (!hpsa_sas_transport_template)
9872 return -ENODEV;
9873
9874 rc = pci_register_driver(&hpsa_pci_driver);
9875
9876 if (rc)
9877 sas_release_transport(hpsa_sas_transport_template);
9878
9879 return rc;
9880 }
9881
9882 static void __exit hpsa_cleanup(void)
9883 {
9884 pci_unregister_driver(&hpsa_pci_driver);
9885 sas_release_transport(hpsa_sas_transport_template);
9886 }
9887
9888 static void __attribute__((unused)) verify_offsets(void)
9889 {
9890 #define VERIFY_OFFSET(member, offset) \
9891 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9892
9893 VERIFY_OFFSET(structure_size, 0);
9894 VERIFY_OFFSET(volume_blk_size, 4);
9895 VERIFY_OFFSET(volume_blk_cnt, 8);
9896 VERIFY_OFFSET(phys_blk_shift, 16);
9897 VERIFY_OFFSET(parity_rotation_shift, 17);
9898 VERIFY_OFFSET(strip_size, 18);
9899 VERIFY_OFFSET(disk_starting_blk, 20);
9900 VERIFY_OFFSET(disk_blk_cnt, 28);
9901 VERIFY_OFFSET(data_disks_per_row, 36);
9902 VERIFY_OFFSET(metadata_disks_per_row, 38);
9903 VERIFY_OFFSET(row_cnt, 40);
9904 VERIFY_OFFSET(layout_map_count, 42);
9905 VERIFY_OFFSET(flags, 44);
9906 VERIFY_OFFSET(dekindex, 46);
9907
9908 VERIFY_OFFSET(data, 64);
9909
9910 #undef VERIFY_OFFSET
9911
9912 #define VERIFY_OFFSET(member, offset) \
9913 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9914
9915 VERIFY_OFFSET(IU_type, 0);
9916 VERIFY_OFFSET(direction, 1);
9917 VERIFY_OFFSET(reply_queue, 2);
9918
9919 VERIFY_OFFSET(scsi_nexus, 4);
9920 VERIFY_OFFSET(Tag, 8);
9921 VERIFY_OFFSET(cdb, 16);
9922 VERIFY_OFFSET(cciss_lun, 32);
9923 VERIFY_OFFSET(data_len, 40);
9924 VERIFY_OFFSET(cmd_priority_task_attr, 44);
9925 VERIFY_OFFSET(sg_count, 45);
9926
9927 VERIFY_OFFSET(err_ptr, 48);
9928 VERIFY_OFFSET(err_len, 56);
9929
9930 VERIFY_OFFSET(sg, 64);
9931
9932 #undef VERIFY_OFFSET
9933
9934 #define VERIFY_OFFSET(member, offset) \
9935 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
9936
9937 VERIFY_OFFSET(dev_handle, 0x00);
9938 VERIFY_OFFSET(reserved1, 0x02);
9939 VERIFY_OFFSET(function, 0x03);
9940 VERIFY_OFFSET(reserved2, 0x04);
9941 VERIFY_OFFSET(err_info, 0x0C);
9942 VERIFY_OFFSET(reserved3, 0x10);
9943 VERIFY_OFFSET(err_info_len, 0x12);
9944 VERIFY_OFFSET(reserved4, 0x13);
9945 VERIFY_OFFSET(sgl_offset, 0x14);
9946 VERIFY_OFFSET(reserved5, 0x15);
9947 VERIFY_OFFSET(transfer_len, 0x1C);
9948 VERIFY_OFFSET(reserved6, 0x20);
9949 VERIFY_OFFSET(io_flags, 0x24);
9950 VERIFY_OFFSET(reserved7, 0x26);
9951 VERIFY_OFFSET(LUN, 0x34);
9952 VERIFY_OFFSET(control, 0x3C);
9953 VERIFY_OFFSET(CDB, 0x40);
9954 VERIFY_OFFSET(reserved8, 0x50);
9955 VERIFY_OFFSET(host_context_flags, 0x60);
9956 VERIFY_OFFSET(timeout_sec, 0x62);
9957 VERIFY_OFFSET(ReplyQueue, 0x64);
9958 VERIFY_OFFSET(reserved9, 0x65);
9959 VERIFY_OFFSET(tag, 0x68);
9960 VERIFY_OFFSET(host_addr, 0x70);
9961 VERIFY_OFFSET(CISS_LUN, 0x78);
9962 VERIFY_OFFSET(SG, 0x78 + 8);
9963 #undef VERIFY_OFFSET
9964 }
9965
9966 module_init(hpsa_init);
9967 module_exit(hpsa_cleanup);