This source file includes following definitions.
- ips_setup
- ips_detect
- ips_setup_funclist
- ips_release
- ips_halt
- ips_eh_abort
- __ips_eh_reset
- ips_eh_reset
- ips_queue_lck
- DEF_SCSI_QCMD
- ips_slave_configure
- do_ipsintr
- ips_intr_copperhead
- ips_intr_morpheus
- ips_info
- ips_write_info
- ips_show_info
- ips_is_passthru
- ips_alloc_passthru_buffer
- ips_make_passthru
- ips_flash_copperhead
- ips_flash_bios
- ips_fill_scb_sg_single
- ips_flash_firmware
- ips_free_flash_copperhead
- ips_usrcmd
- ips_cleanup_passthru
- ips_host_info
- ips_identify_controller
- ips_get_bios_version
- ips_hainit
- ips_next
- ips_putq_scb_head
- ips_removeq_scb_head
- ips_removeq_scb
- ips_putq_wait_tail
- ips_removeq_wait_head
- ips_removeq_wait
- ips_putq_copp_tail
- ips_removeq_copp_head
- ips_removeq_copp
- ipsintr_blocking
- ipsintr_done
- ips_done
- ips_map_status
- ips_send_wait
- ips_scmd_buf_write
- ips_scmd_buf_read
- ips_send_cmd
- ips_chkstatus
- ips_online
- ips_inquiry
- ips_rdcap
- ips_msense
- ips_reqsen
- ips_free
- ips_deallocatescbs
- ips_allocatescbs
- ips_init_scb
- ips_getscb
- ips_freescb
- ips_isinit_copperhead
- ips_isinit_copperhead_memio
- ips_isinit_morpheus
- ips_flush_and_reset
- ips_poll_for_flush_complete
- ips_enable_int_copperhead
- ips_enable_int_copperhead_memio
- ips_enable_int_morpheus
- ips_init_copperhead
- ips_init_copperhead_memio
- ips_init_morpheus
- ips_reset_copperhead
- ips_reset_copperhead_memio
- ips_reset_morpheus
- ips_statinit
- ips_statinit_memio
- ips_statupd_copperhead
- ips_statupd_copperhead_memio
- ips_statupd_morpheus
- ips_issue_copperhead
- ips_issue_copperhead_memio
- ips_issue_i2o
- ips_issue_i2o_memio
- ips_isintr_copperhead
- ips_isintr_copperhead_memio
- ips_isintr_morpheus
- ips_wait
- ips_write_driver_status
- ips_read_adapter_status
- ips_read_subsystem_parameters
- ips_read_config
- ips_readwrite_page5
- ips_clear_adapter
- ips_ffdc_reset
- ips_ffdc_time
- ips_fix_ffdc_time
- ips_erase_bios
- ips_erase_bios_memio
- ips_program_bios
- ips_program_bios_memio
- ips_verify_bios
- ips_verify_bios_memio
- ips_abort_init
- ips_shift_controllers
- ips_order_controllers
- ips_register_scsi
- ips_remove_device
- ips_module_init
- ips_module_exit
- ips_insert_device
- ips_init_phase1
- ips_init_phase2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164 #include <asm/io.h>
165 #include <asm/byteorder.h>
166 #include <asm/page.h>
167 #include <linux/stddef.h>
168 #include <linux/string.h>
169 #include <linux/errno.h>
170 #include <linux/kernel.h>
171 #include <linux/ioport.h>
172 #include <linux/slab.h>
173 #include <linux/delay.h>
174 #include <linux/pci.h>
175 #include <linux/proc_fs.h>
176 #include <linux/reboot.h>
177 #include <linux/interrupt.h>
178
179 #include <linux/blkdev.h>
180 #include <linux/types.h>
181 #include <linux/dma-mapping.h>
182
183 #include <scsi/sg.h>
184 #include "scsi.h"
185 #include <scsi/scsi_host.h>
186
187 #include "ips.h"
188
189 #include <linux/module.h>
190
191 #include <linux/stat.h>
192
193 #include <linux/spinlock.h>
194 #include <linux/init.h>
195
196 #include <linux/smp.h>
197
198 #ifdef MODULE
199 static char *ips = NULL;
200 module_param(ips, charp, 0);
201 #endif
202
203
204
205
206 #define IPS_VERSION_HIGH IPS_VER_MAJOR_STRING "." IPS_VER_MINOR_STRING
207 #define IPS_VERSION_LOW "." IPS_VER_BUILD_STRING " "
208
209 #define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
210 DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
211 DMA_BIDIRECTIONAL : \
212 scb->scsi_cmd->sc_data_direction)
213
214 #ifdef IPS_DEBUG
215 #define METHOD_TRACE(s, i) if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n");
216 #define DEBUG(i, s) if (ips_debug >= i) printk(KERN_NOTICE s "\n");
217 #define DEBUG_VAR(i, s, v...) if (ips_debug >= i) printk(KERN_NOTICE s "\n", v);
218 #else
219 #define METHOD_TRACE(s, i)
220 #define DEBUG(i, s)
221 #define DEBUG_VAR(i, s, v...)
222 #endif
223
224
225
226
227 static int ips_eh_abort(struct scsi_cmnd *);
228 static int ips_eh_reset(struct scsi_cmnd *);
229 static int ips_queue(struct Scsi_Host *, struct scsi_cmnd *);
230 static const char *ips_info(struct Scsi_Host *);
231 static irqreturn_t do_ipsintr(int, void *);
232 static int ips_hainit(ips_ha_t *);
233 static int ips_map_status(ips_ha_t *, ips_scb_t *, ips_stat_t *);
234 static int ips_send_wait(ips_ha_t *, ips_scb_t *, int, int);
235 static int ips_send_cmd(ips_ha_t *, ips_scb_t *);
236 static int ips_online(ips_ha_t *, ips_scb_t *);
237 static int ips_inquiry(ips_ha_t *, ips_scb_t *);
238 static int ips_rdcap(ips_ha_t *, ips_scb_t *);
239 static int ips_msense(ips_ha_t *, ips_scb_t *);
240 static int ips_reqsen(ips_ha_t *, ips_scb_t *);
241 static int ips_deallocatescbs(ips_ha_t *, int);
242 static int ips_allocatescbs(ips_ha_t *);
243 static int ips_reset_copperhead(ips_ha_t *);
244 static int ips_reset_copperhead_memio(ips_ha_t *);
245 static int ips_reset_morpheus(ips_ha_t *);
246 static int ips_issue_copperhead(ips_ha_t *, ips_scb_t *);
247 static int ips_issue_copperhead_memio(ips_ha_t *, ips_scb_t *);
248 static int ips_issue_i2o(ips_ha_t *, ips_scb_t *);
249 static int ips_issue_i2o_memio(ips_ha_t *, ips_scb_t *);
250 static int ips_isintr_copperhead(ips_ha_t *);
251 static int ips_isintr_copperhead_memio(ips_ha_t *);
252 static int ips_isintr_morpheus(ips_ha_t *);
253 static int ips_wait(ips_ha_t *, int, int);
254 static int ips_write_driver_status(ips_ha_t *, int);
255 static int ips_read_adapter_status(ips_ha_t *, int);
256 static int ips_read_subsystem_parameters(ips_ha_t *, int);
257 static int ips_read_config(ips_ha_t *, int);
258 static int ips_clear_adapter(ips_ha_t *, int);
259 static int ips_readwrite_page5(ips_ha_t *, int, int);
260 static int ips_init_copperhead(ips_ha_t *);
261 static int ips_init_copperhead_memio(ips_ha_t *);
262 static int ips_init_morpheus(ips_ha_t *);
263 static int ips_isinit_copperhead(ips_ha_t *);
264 static int ips_isinit_copperhead_memio(ips_ha_t *);
265 static int ips_isinit_morpheus(ips_ha_t *);
266 static int ips_erase_bios(ips_ha_t *);
267 static int ips_program_bios(ips_ha_t *, char *, uint32_t, uint32_t);
268 static int ips_verify_bios(ips_ha_t *, char *, uint32_t, uint32_t);
269 static int ips_erase_bios_memio(ips_ha_t *);
270 static int ips_program_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t);
271 static int ips_verify_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t);
272 static int ips_flash_copperhead(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
273 static int ips_flash_bios(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
274 static int ips_flash_firmware(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
275 static void ips_free_flash_copperhead(ips_ha_t * ha);
276 static void ips_get_bios_version(ips_ha_t *, int);
277 static void ips_identify_controller(ips_ha_t *);
278 static void ips_chkstatus(ips_ha_t *, IPS_STATUS *);
279 static void ips_enable_int_copperhead(ips_ha_t *);
280 static void ips_enable_int_copperhead_memio(ips_ha_t *);
281 static void ips_enable_int_morpheus(ips_ha_t *);
282 static int ips_intr_copperhead(ips_ha_t *);
283 static int ips_intr_morpheus(ips_ha_t *);
284 static void ips_next(ips_ha_t *, int);
285 static void ipsintr_blocking(ips_ha_t *, struct ips_scb *);
286 static void ipsintr_done(ips_ha_t *, struct ips_scb *);
287 static void ips_done(ips_ha_t *, ips_scb_t *);
288 static void ips_free(ips_ha_t *);
289 static void ips_init_scb(ips_ha_t *, ips_scb_t *);
290 static void ips_freescb(ips_ha_t *, ips_scb_t *);
291 static void ips_setup_funclist(ips_ha_t *);
292 static void ips_statinit(ips_ha_t *);
293 static void ips_statinit_memio(ips_ha_t *);
294 static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time64_t);
295 static void ips_ffdc_reset(ips_ha_t *, int);
296 static void ips_ffdc_time(ips_ha_t *);
297 static uint32_t ips_statupd_copperhead(ips_ha_t *);
298 static uint32_t ips_statupd_copperhead_memio(ips_ha_t *);
299 static uint32_t ips_statupd_morpheus(ips_ha_t *);
300 static ips_scb_t *ips_getscb(ips_ha_t *);
301 static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *);
302 static void ips_putq_wait_tail(ips_wait_queue_entry_t *, struct scsi_cmnd *);
303 static void ips_putq_copp_tail(ips_copp_queue_t *,
304 ips_copp_wait_item_t *);
305 static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *);
306 static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *);
307 static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *);
308 static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *,
309 struct scsi_cmnd *);
310 static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *,
311 ips_copp_wait_item_t *);
312 static ips_copp_wait_item_t *ips_removeq_copp_head(ips_copp_queue_t *);
313
314 static int ips_is_passthru(struct scsi_cmnd *);
315 static int ips_make_passthru(ips_ha_t *, struct scsi_cmnd *, ips_scb_t *, int);
316 static int ips_usrcmd(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
317 static void ips_cleanup_passthru(ips_ha_t *, ips_scb_t *);
318 static void ips_scmd_buf_write(struct scsi_cmnd * scmd, void *data,
319 unsigned int count);
320 static void ips_scmd_buf_read(struct scsi_cmnd * scmd, void *data,
321 unsigned int count);
322
323 static int ips_write_info(struct Scsi_Host *, char *, int);
324 static int ips_show_info(struct seq_file *, struct Scsi_Host *);
325 static int ips_host_info(ips_ha_t *, struct seq_file *);
326 static int ips_abort_init(ips_ha_t * ha, int index);
327 static int ips_init_phase2(int index);
328
329 static int ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr);
330 static int ips_register_scsi(int index);
331
332 static int ips_poll_for_flush_complete(ips_ha_t * ha);
333 static void ips_flush_and_reset(ips_ha_t *ha);
334
335
336
337
338 static const char ips_name[] = "ips";
339 static struct Scsi_Host *ips_sh[IPS_MAX_ADAPTERS];
340 static ips_ha_t *ips_ha[IPS_MAX_ADAPTERS];
341 static unsigned int ips_next_controller;
342 static unsigned int ips_num_controllers;
343 static unsigned int ips_released_controllers;
344 static int ips_hotplug;
345 static int ips_cmd_timeout = 60;
346 static int ips_reset_timeout = 60 * 5;
347 static int ips_force_memio = 1;
348 static int ips_force_i2o = 1;
349 static int ips_ioctlsize = IPS_IOCTL_SIZE;
350 static int ips_cd_boot;
351 static char *ips_FlashData = NULL;
352 static dma_addr_t ips_flashbusaddr;
353 static long ips_FlashDataInUse;
354 static uint32_t MaxLiteCmds = 32;
355 static struct scsi_host_template ips_driver_template = {
356 .info = ips_info,
357 .queuecommand = ips_queue,
358 .eh_abort_handler = ips_eh_abort,
359 .eh_host_reset_handler = ips_eh_reset,
360 .proc_name = "ips",
361 .show_info = ips_show_info,
362 .write_info = ips_write_info,
363 .slave_configure = ips_slave_configure,
364 .bios_param = ips_biosparam,
365 .this_id = -1,
366 .sg_tablesize = IPS_MAX_SG,
367 .cmd_per_lun = 3,
368 .no_write_same = 1,
369 };
370
371
372
373 static struct pci_device_id ips_pci_table[] = {
374 { 0x1014, 0x002E, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
375 { 0x1014, 0x01BD, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
376 { 0x9005, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
377 { 0, }
378 };
379
380 MODULE_DEVICE_TABLE( pci, ips_pci_table );
381
382 static char ips_hot_plug_name[] = "ips";
383
384 static int ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent);
385 static void ips_remove_device(struct pci_dev *pci_dev);
386
387 static struct pci_driver ips_pci_driver = {
388 .name = ips_hot_plug_name,
389 .id_table = ips_pci_table,
390 .probe = ips_insert_device,
391 .remove = ips_remove_device,
392 };
393
394
395
396
397
398 static int ips_halt(struct notifier_block *nb, ulong event, void *buf);
399
400 #define MAX_ADAPTER_NAME 15
401
402 static char ips_adapter_name[][30] = {
403 "ServeRAID",
404 "ServeRAID II",
405 "ServeRAID on motherboard",
406 "ServeRAID on motherboard",
407 "ServeRAID 3H",
408 "ServeRAID 3L",
409 "ServeRAID 4H",
410 "ServeRAID 4M",
411 "ServeRAID 4L",
412 "ServeRAID 4Mx",
413 "ServeRAID 4Lx",
414 "ServeRAID 5i",
415 "ServeRAID 5i",
416 "ServeRAID 6M",
417 "ServeRAID 6i",
418 "ServeRAID 7t",
419 "ServeRAID 7k",
420 "ServeRAID 7M"
421 };
422
423 static struct notifier_block ips_notifier = {
424 ips_halt, NULL, 0
425 };
426
427
428
429
430 static char ips_command_direction[] = {
431 IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT,
432 IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK,
433 IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
434 IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT,
435 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_OUT,
436 IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT,
437 IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_IN,
438 IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK,
439 IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_UNK,
440 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT,
441 IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE,
442 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT,
443 IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT,
444 IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_NONE,
445 IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK,
446 IPS_DATA_NONE, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK,
447 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
448 IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
449 IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
450 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
451 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
452 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
453 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
454 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
455 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
456 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
457 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
458 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
459 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
460 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
461 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
462 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
463 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
464 IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_NONE,
465 IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_OUT,
466 IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_NONE,
467 IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN,
468 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
469 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
470 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
471 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
472 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
473 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
474 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
475 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
476 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
477 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_OUT,
478 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
479 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
480 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
481 IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK
482 };
483
484
485
486
487
488
489
490
491
492
493
494 static int
495 ips_setup(char *ips_str)
496 {
497
498 int i;
499 char *key;
500 char *value;
501 IPS_OPTION options[] = {
502 {"noi2o", &ips_force_i2o, 0},
503 {"nommap", &ips_force_memio, 0},
504 {"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE},
505 {"cdboot", &ips_cd_boot, 0},
506 {"maxcmds", &MaxLiteCmds, 32},
507 };
508
509
510
511 while ((key = strsep(&ips_str, ",."))) {
512 if (!*key)
513 continue;
514 value = strchr(key, ':');
515 if (value)
516 *value++ = '\0';
517
518
519
520
521 for (i = 0; i < ARRAY_SIZE(options); i++) {
522 if (strncasecmp
523 (key, options[i].option_name,
524 strlen(options[i].option_name)) == 0) {
525 if (value)
526 *options[i].option_flag =
527 simple_strtoul(value, NULL, 0);
528 else
529 *options[i].option_flag =
530 options[i].option_value;
531 break;
532 }
533 }
534 }
535
536 return (1);
537 }
538
539 __setup("ips=", ips_setup);
540
541
542
543
544
545
546
547
548
549
550
551
552 static int
553 ips_detect(struct scsi_host_template * SHT)
554 {
555 int i;
556
557 METHOD_TRACE("ips_detect", 1);
558
559 #ifdef MODULE
560 if (ips)
561 ips_setup(ips);
562 #endif
563
564 for (i = 0; i < ips_num_controllers; i++) {
565 if (ips_register_scsi(i))
566 ips_free(ips_ha[i]);
567 ips_released_controllers++;
568 }
569 ips_hotplug = 1;
570 return (ips_num_controllers);
571 }
572
573
574
575
576
577 static void
578 ips_setup_funclist(ips_ha_t * ha)
579 {
580
581
582
583
584 if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) {
585
586 ha->func.isintr = ips_isintr_morpheus;
587 ha->func.isinit = ips_isinit_morpheus;
588 ha->func.issue = ips_issue_i2o_memio;
589 ha->func.init = ips_init_morpheus;
590 ha->func.statupd = ips_statupd_morpheus;
591 ha->func.reset = ips_reset_morpheus;
592 ha->func.intr = ips_intr_morpheus;
593 ha->func.enableint = ips_enable_int_morpheus;
594 } else if (IPS_USE_MEMIO(ha)) {
595
596 ha->func.isintr = ips_isintr_copperhead_memio;
597 ha->func.isinit = ips_isinit_copperhead_memio;
598 ha->func.init = ips_init_copperhead_memio;
599 ha->func.statupd = ips_statupd_copperhead_memio;
600 ha->func.statinit = ips_statinit_memio;
601 ha->func.reset = ips_reset_copperhead_memio;
602 ha->func.intr = ips_intr_copperhead;
603 ha->func.erasebios = ips_erase_bios_memio;
604 ha->func.programbios = ips_program_bios_memio;
605 ha->func.verifybios = ips_verify_bios_memio;
606 ha->func.enableint = ips_enable_int_copperhead_memio;
607 if (IPS_USE_I2O_DELIVER(ha))
608 ha->func.issue = ips_issue_i2o_memio;
609 else
610 ha->func.issue = ips_issue_copperhead_memio;
611 } else {
612
613 ha->func.isintr = ips_isintr_copperhead;
614 ha->func.isinit = ips_isinit_copperhead;
615 ha->func.init = ips_init_copperhead;
616 ha->func.statupd = ips_statupd_copperhead;
617 ha->func.statinit = ips_statinit;
618 ha->func.reset = ips_reset_copperhead;
619 ha->func.intr = ips_intr_copperhead;
620 ha->func.erasebios = ips_erase_bios;
621 ha->func.programbios = ips_program_bios;
622 ha->func.verifybios = ips_verify_bios;
623 ha->func.enableint = ips_enable_int_copperhead;
624
625 if (IPS_USE_I2O_DELIVER(ha))
626 ha->func.issue = ips_issue_i2o;
627 else
628 ha->func.issue = ips_issue_copperhead;
629 }
630 }
631
632
633
634
635
636
637
638
639
640
641 static int
642 ips_release(struct Scsi_Host *sh)
643 {
644 ips_scb_t *scb;
645 ips_ha_t *ha;
646 int i;
647
648 METHOD_TRACE("ips_release", 1);
649
650 scsi_remove_host(sh);
651
652 for (i = 0; i < IPS_MAX_ADAPTERS && ips_sh[i] != sh; i++) ;
653
654 if (i == IPS_MAX_ADAPTERS) {
655 printk(KERN_WARNING
656 "(%s) release, invalid Scsi_Host pointer.\n", ips_name);
657 BUG();
658 return (FALSE);
659 }
660
661 ha = IPS_HA(sh);
662
663 if (!ha)
664 return (FALSE);
665
666
667 scb = &ha->scbs[ha->max_cmds - 1];
668
669 ips_init_scb(ha, scb);
670
671 scb->timeout = ips_cmd_timeout;
672 scb->cdb[0] = IPS_CMD_FLUSH;
673
674 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
675 scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
676 scb->cmd.flush_cache.state = IPS_NORM_STATE;
677 scb->cmd.flush_cache.reserved = 0;
678 scb->cmd.flush_cache.reserved2 = 0;
679 scb->cmd.flush_cache.reserved3 = 0;
680 scb->cmd.flush_cache.reserved4 = 0;
681
682 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n");
683
684
685 if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == IPS_FAILURE)
686 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Incomplete Flush.\n");
687
688 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Complete.\n");
689
690 ips_sh[i] = NULL;
691 ips_ha[i] = NULL;
692
693
694 ips_free(ha);
695
696
697 free_irq(ha->pcidev->irq, ha);
698
699 scsi_host_put(sh);
700
701 ips_released_controllers++;
702
703 return (FALSE);
704 }
705
706
707
708
709
710
711
712
713
714
715 static int
716 ips_halt(struct notifier_block *nb, ulong event, void *buf)
717 {
718 ips_scb_t *scb;
719 ips_ha_t *ha;
720 int i;
721
722 if ((event != SYS_RESTART) && (event != SYS_HALT) &&
723 (event != SYS_POWER_OFF))
724 return (NOTIFY_DONE);
725
726 for (i = 0; i < ips_next_controller; i++) {
727 ha = (ips_ha_t *) ips_ha[i];
728
729 if (!ha)
730 continue;
731
732 if (!ha->active)
733 continue;
734
735
736 scb = &ha->scbs[ha->max_cmds - 1];
737
738 ips_init_scb(ha, scb);
739
740 scb->timeout = ips_cmd_timeout;
741 scb->cdb[0] = IPS_CMD_FLUSH;
742
743 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
744 scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
745 scb->cmd.flush_cache.state = IPS_NORM_STATE;
746 scb->cmd.flush_cache.reserved = 0;
747 scb->cmd.flush_cache.reserved2 = 0;
748 scb->cmd.flush_cache.reserved3 = 0;
749 scb->cmd.flush_cache.reserved4 = 0;
750
751 IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n");
752
753
754 if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) ==
755 IPS_FAILURE)
756 IPS_PRINTK(KERN_WARNING, ha->pcidev,
757 "Incomplete Flush.\n");
758 else
759 IPS_PRINTK(KERN_WARNING, ha->pcidev,
760 "Flushing Complete.\n");
761 }
762
763 return (NOTIFY_OK);
764 }
765
766
767
768
769
770
771
772
773
774
775 int ips_eh_abort(struct scsi_cmnd *SC)
776 {
777 ips_ha_t *ha;
778 ips_copp_wait_item_t *item;
779 int ret;
780 struct Scsi_Host *host;
781
782 METHOD_TRACE("ips_eh_abort", 1);
783
784 if (!SC)
785 return (FAILED);
786
787 host = SC->device->host;
788 ha = (ips_ha_t *) SC->device->host->hostdata;
789
790 if (!ha)
791 return (FAILED);
792
793 if (!ha->active)
794 return (FAILED);
795
796 spin_lock(host->host_lock);
797
798
799 item = ha->copp_waitlist.head;
800 while ((item) && (item->scsi_cmd != SC))
801 item = item->next;
802
803 if (item) {
804
805 ips_removeq_copp(&ha->copp_waitlist, item);
806 ret = (SUCCESS);
807
808
809 } else if (ips_removeq_wait(&ha->scb_waitlist, SC)) {
810
811 ret = (SUCCESS);
812 } else {
813
814 ret = (FAILED);
815 }
816
817 spin_unlock(host->host_lock);
818 return ret;
819 }
820
821
822
823
824
825
826
827
828
829
830
831
832 static int __ips_eh_reset(struct scsi_cmnd *SC)
833 {
834 int ret;
835 int i;
836 ips_ha_t *ha;
837 ips_scb_t *scb;
838 ips_copp_wait_item_t *item;
839
840 METHOD_TRACE("ips_eh_reset", 1);
841
842 #ifdef NO_IPS_RESET
843 return (FAILED);
844 #else
845
846 if (!SC) {
847 DEBUG(1, "Reset called with NULL scsi command");
848
849 return (FAILED);
850 }
851
852 ha = (ips_ha_t *) SC->device->host->hostdata;
853
854 if (!ha) {
855 DEBUG(1, "Reset called with NULL ha struct");
856
857 return (FAILED);
858 }
859
860 if (!ha->active)
861 return (FAILED);
862
863
864 item = ha->copp_waitlist.head;
865 while ((item) && (item->scsi_cmd != SC))
866 item = item->next;
867
868 if (item) {
869
870 ips_removeq_copp(&ha->copp_waitlist, item);
871 return (SUCCESS);
872 }
873
874
875 if (ips_removeq_wait(&ha->scb_waitlist, SC)) {
876
877 return (SUCCESS);
878 }
879
880
881
882
883
884
885
886
887
888
889
890 if (ha->ioctl_reset == 0) {
891 scb = &ha->scbs[ha->max_cmds - 1];
892
893 ips_init_scb(ha, scb);
894
895 scb->timeout = ips_cmd_timeout;
896 scb->cdb[0] = IPS_CMD_FLUSH;
897
898 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
899 scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
900 scb->cmd.flush_cache.state = IPS_NORM_STATE;
901 scb->cmd.flush_cache.reserved = 0;
902 scb->cmd.flush_cache.reserved2 = 0;
903 scb->cmd.flush_cache.reserved3 = 0;
904 scb->cmd.flush_cache.reserved4 = 0;
905
906
907 ret = ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_IORL);
908 if (ret == IPS_SUCCESS) {
909 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
910 "Reset Request - Flushed Cache\n");
911 return (SUCCESS);
912 }
913 }
914
915
916
917
918 ha->ioctl_reset = 0;
919
920
921
922
923
924 IPS_PRINTK(KERN_NOTICE, ha->pcidev, "Resetting controller.\n");
925 ret = (*ha->func.reset) (ha);
926
927 if (!ret) {
928 struct scsi_cmnd *scsi_cmd;
929
930 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
931 "Controller reset failed - controller now offline.\n");
932
933
934 DEBUG_VAR(1, "(%s%d) Failing active commands",
935 ips_name, ha->host_num);
936
937 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
938 scb->scsi_cmd->result = DID_ERROR << 16;
939 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
940 ips_freescb(ha, scb);
941 }
942
943
944 DEBUG_VAR(1, "(%s%d) Failing pending commands",
945 ips_name, ha->host_num);
946
947 while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
948 scsi_cmd->result = DID_ERROR;
949 scsi_cmd->scsi_done(scsi_cmd);
950 }
951
952 ha->active = FALSE;
953 return (FAILED);
954 }
955
956 if (!ips_clear_adapter(ha, IPS_INTR_IORL)) {
957 struct scsi_cmnd *scsi_cmd;
958
959 IPS_PRINTK(KERN_NOTICE, ha->pcidev,
960 "Controller reset failed - controller now offline.\n");
961
962
963 DEBUG_VAR(1, "(%s%d) Failing active commands",
964 ips_name, ha->host_num);
965
966 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
967 scb->scsi_cmd->result = DID_ERROR << 16;
968 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
969 ips_freescb(ha, scb);
970 }
971
972
973 DEBUG_VAR(1, "(%s%d) Failing pending commands",
974 ips_name, ha->host_num);
975
976 while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
977 scsi_cmd->result = DID_ERROR << 16;
978 scsi_cmd->scsi_done(scsi_cmd);
979 }
980
981 ha->active = FALSE;
982 return (FAILED);
983 }
984
985
986 if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) {
987 ha->last_ffdc = ktime_get_real_seconds();
988 ha->reset_count++;
989 ips_ffdc_reset(ha, IPS_INTR_IORL);
990 }
991
992
993 DEBUG_VAR(1, "(%s%d) Failing active commands", ips_name, ha->host_num);
994
995 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
996 scb->scsi_cmd->result = DID_RESET << 16;
997 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
998 ips_freescb(ha, scb);
999 }
1000
1001
1002 for (i = 1; i < ha->nbus; i++)
1003 ha->dcdb_active[i - 1] = 0;
1004
1005
1006 ha->num_ioctl = 0;
1007
1008 ips_next(ha, IPS_INTR_IORL);
1009
1010 return (SUCCESS);
1011 #endif
1012
1013 }
1014
1015 static int ips_eh_reset(struct scsi_cmnd *SC)
1016 {
1017 int rc;
1018
1019 spin_lock_irq(SC->device->host->host_lock);
1020 rc = __ips_eh_reset(SC);
1021 spin_unlock_irq(SC->device->host->host_lock);
1022
1023 return rc;
1024 }
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038 static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
1039 {
1040 ips_ha_t *ha;
1041 ips_passthru_t *pt;
1042
1043 METHOD_TRACE("ips_queue", 1);
1044
1045 ha = (ips_ha_t *) SC->device->host->hostdata;
1046
1047 if (!ha)
1048 return (1);
1049
1050 if (!ha->active)
1051 return (DID_ERROR);
1052
1053 if (ips_is_passthru(SC)) {
1054 if (ha->copp_waitlist.count == IPS_MAX_IOCTL_QUEUE) {
1055 SC->result = DID_BUS_BUSY << 16;
1056 done(SC);
1057
1058 return (0);
1059 }
1060 } else if (ha->scb_waitlist.count == IPS_MAX_QUEUE) {
1061 SC->result = DID_BUS_BUSY << 16;
1062 done(SC);
1063
1064 return (0);
1065 }
1066
1067 SC->scsi_done = done;
1068
1069 DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)",
1070 ips_name,
1071 ha->host_num,
1072 SC->cmnd[0],
1073 SC->device->channel, SC->device->id, SC->device->lun);
1074
1075
1076 if ((scmd_channel(SC) > 0)
1077 && (scmd_id(SC) == ha->ha_id[scmd_channel(SC)])) {
1078 SC->result = DID_NO_CONNECT << 16;
1079 done(SC);
1080
1081 return (0);
1082 }
1083
1084 if (ips_is_passthru(SC)) {
1085
1086 ips_copp_wait_item_t *scratch;
1087
1088
1089
1090
1091 pt = (ips_passthru_t *) scsi_sglist(SC);
1092 if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) &&
1093 (pt->CoppCP.cmd.reset.adapter_flag == 1)) {
1094 if (ha->scb_activelist.count != 0) {
1095 SC->result = DID_BUS_BUSY << 16;
1096 done(SC);
1097 return (0);
1098 }
1099 ha->ioctl_reset = 1;
1100 __ips_eh_reset(SC);
1101 SC->result = DID_OK << 16;
1102 SC->scsi_done(SC);
1103 return (0);
1104 }
1105
1106
1107 scratch = kmalloc(sizeof (ips_copp_wait_item_t), GFP_ATOMIC);
1108
1109 if (!scratch) {
1110 SC->result = DID_ERROR << 16;
1111 done(SC);
1112
1113 return (0);
1114 }
1115
1116 scratch->scsi_cmd = SC;
1117 scratch->next = NULL;
1118
1119 ips_putq_copp_tail(&ha->copp_waitlist, scratch);
1120 } else {
1121 ips_putq_wait_tail(&ha->scb_waitlist, SC);
1122 }
1123
1124 ips_next(ha, IPS_INTR_IORL);
1125
1126 return (0);
1127 }
1128
1129 static DEF_SCSI_QCMD(ips_queue)
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140 static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1141 sector_t capacity, int geom[])
1142 {
1143 ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata;
1144 int heads;
1145 int sectors;
1146 int cylinders;
1147
1148 METHOD_TRACE("ips_biosparam", 1);
1149
1150 if (!ha)
1151
1152 return (0);
1153
1154 if (!ha->active)
1155 return (0);
1156
1157 if (!ips_read_adapter_status(ha, IPS_INTR_ON))
1158
1159 return (0);
1160
1161 if ((capacity > 0x400000) && ((ha->enq->ucMiscFlag & 0x8) == 0)) {
1162 heads = IPS_NORM_HEADS;
1163 sectors = IPS_NORM_SECTORS;
1164 } else {
1165 heads = IPS_COMP_HEADS;
1166 sectors = IPS_COMP_SECTORS;
1167 }
1168
1169 cylinders = (unsigned long) capacity / (heads * sectors);
1170
1171 DEBUG_VAR(2, "Geometry: heads: %d, sectors: %d, cylinders: %d",
1172 heads, sectors, cylinders);
1173
1174 geom[0] = heads;
1175 geom[1] = sectors;
1176 geom[2] = cylinders;
1177
1178 return (0);
1179 }
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 static int
1191 ips_slave_configure(struct scsi_device * SDptr)
1192 {
1193 ips_ha_t *ha;
1194 int min;
1195
1196 ha = IPS_HA(SDptr->host);
1197 if (SDptr->tagged_supported && SDptr->type == TYPE_DISK) {
1198 min = ha->max_cmds / 2;
1199 if (ha->enq->ucLogDriveCount <= 2)
1200 min = ha->max_cmds - 1;
1201 scsi_change_queue_depth(SDptr, min);
1202 }
1203
1204 SDptr->skip_ms_page_8 = 1;
1205 SDptr->skip_ms_page_3f = 1;
1206 return 0;
1207 }
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218 static irqreturn_t
1219 do_ipsintr(int irq, void *dev_id)
1220 {
1221 ips_ha_t *ha;
1222 struct Scsi_Host *host;
1223 int irqstatus;
1224
1225 METHOD_TRACE("do_ipsintr", 2);
1226
1227 ha = (ips_ha_t *) dev_id;
1228 if (!ha)
1229 return IRQ_NONE;
1230 host = ips_sh[ha->host_num];
1231
1232 if (!host) {
1233 (*ha->func.intr) (ha);
1234 return IRQ_HANDLED;
1235 }
1236
1237 spin_lock(host->host_lock);
1238
1239 if (!ha->active) {
1240 spin_unlock(host->host_lock);
1241 return IRQ_HANDLED;
1242 }
1243
1244 irqstatus = (*ha->func.intr) (ha);
1245
1246 spin_unlock(host->host_lock);
1247
1248
1249 ips_next(ha, IPS_INTR_ON);
1250 return IRQ_RETVAL(irqstatus);
1251 }
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264 int
1265 ips_intr_copperhead(ips_ha_t * ha)
1266 {
1267 ips_stat_t *sp;
1268 ips_scb_t *scb;
1269 IPS_STATUS cstatus;
1270 int intrstatus;
1271
1272 METHOD_TRACE("ips_intr", 2);
1273
1274 if (!ha)
1275 return 0;
1276
1277 if (!ha->active)
1278 return 0;
1279
1280 intrstatus = (*ha->func.isintr) (ha);
1281
1282 if (!intrstatus) {
1283
1284
1285
1286
1287 return 0;
1288 }
1289
1290 while (TRUE) {
1291 sp = &ha->sp;
1292
1293 intrstatus = (*ha->func.isintr) (ha);
1294
1295 if (!intrstatus)
1296 break;
1297 else
1298 cstatus.value = (*ha->func.statupd) (ha);
1299
1300 if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) {
1301
1302 continue;
1303 }
1304
1305 ips_chkstatus(ha, &cstatus);
1306 scb = (ips_scb_t *) sp->scb_addr;
1307
1308
1309
1310
1311
1312 (*scb->callback) (ha, scb);
1313 }
1314 return 1;
1315 }
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328 int
1329 ips_intr_morpheus(ips_ha_t * ha)
1330 {
1331 ips_stat_t *sp;
1332 ips_scb_t *scb;
1333 IPS_STATUS cstatus;
1334 int intrstatus;
1335
1336 METHOD_TRACE("ips_intr_morpheus", 2);
1337
1338 if (!ha)
1339 return 0;
1340
1341 if (!ha->active)
1342 return 0;
1343
1344 intrstatus = (*ha->func.isintr) (ha);
1345
1346 if (!intrstatus) {
1347
1348
1349
1350
1351 return 0;
1352 }
1353
1354 while (TRUE) {
1355 sp = &ha->sp;
1356
1357 intrstatus = (*ha->func.isintr) (ha);
1358
1359 if (!intrstatus)
1360 break;
1361 else
1362 cstatus.value = (*ha->func.statupd) (ha);
1363
1364 if (cstatus.value == 0xffffffff)
1365
1366 break;
1367
1368 if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) {
1369 IPS_PRINTK(KERN_WARNING, ha->pcidev,
1370 "Spurious interrupt; no ccb.\n");
1371
1372 continue;
1373 }
1374
1375 ips_chkstatus(ha, &cstatus);
1376 scb = (ips_scb_t *) sp->scb_addr;
1377
1378
1379
1380
1381
1382 (*scb->callback) (ha, scb);
1383 }
1384 return 1;
1385 }
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396 static const char *
1397 ips_info(struct Scsi_Host *SH)
1398 {
1399 static char buffer[256];
1400 char *bp;
1401 ips_ha_t *ha;
1402
1403 METHOD_TRACE("ips_info", 1);
1404
1405 ha = IPS_HA(SH);
1406
1407 if (!ha)
1408 return (NULL);
1409
1410 bp = &buffer[0];
1411 memset(bp, 0, sizeof (buffer));
1412
1413 sprintf(bp, "%s%s%s Build %d", "IBM PCI ServeRAID ",
1414 IPS_VERSION_HIGH, IPS_VERSION_LOW, IPS_BUILD_IDENT);
1415
1416 if (ha->ad_type > 0 && ha->ad_type <= MAX_ADAPTER_NAME) {
1417 strcat(bp, " <");
1418 strcat(bp, ips_adapter_name[ha->ad_type - 1]);
1419 strcat(bp, ">");
1420 }
1421
1422 return (bp);
1423 }
1424
1425 static int
1426 ips_write_info(struct Scsi_Host *host, char *buffer, int length)
1427 {
1428 int i;
1429 ips_ha_t *ha = NULL;
1430
1431
1432 for (i = 0; i < ips_next_controller; i++) {
1433 if (ips_sh[i]) {
1434 if (ips_sh[i] == host) {
1435 ha = (ips_ha_t *) ips_sh[i]->hostdata;
1436 break;
1437 }
1438 }
1439 }
1440
1441 if (!ha)
1442 return (-EINVAL);
1443
1444 return 0;
1445 }
1446
1447 static int
1448 ips_show_info(struct seq_file *m, struct Scsi_Host *host)
1449 {
1450 int i;
1451 ips_ha_t *ha = NULL;
1452
1453
1454 for (i = 0; i < ips_next_controller; i++) {
1455 if (ips_sh[i]) {
1456 if (ips_sh[i] == host) {
1457 ha = (ips_ha_t *) ips_sh[i]->hostdata;
1458 break;
1459 }
1460 }
1461 }
1462
1463 if (!ha)
1464 return (-EINVAL);
1465
1466 return ips_host_info(ha, m);
1467 }
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482 static int ips_is_passthru(struct scsi_cmnd *SC)
1483 {
1484 unsigned long flags;
1485
1486 METHOD_TRACE("ips_is_passthru", 1);
1487
1488 if (!SC)
1489 return (0);
1490
1491 if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) &&
1492 (SC->device->channel == 0) &&
1493 (SC->device->id == IPS_ADAPTER_ID) &&
1494 (SC->device->lun == 0) && scsi_sglist(SC)) {
1495 struct scatterlist *sg = scsi_sglist(SC);
1496 char *buffer;
1497
1498
1499
1500 local_irq_save(flags);
1501 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
1502 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
1503 buffer[2] == 'P' && buffer[3] == 'P') {
1504 kunmap_atomic(buffer - sg->offset);
1505 local_irq_restore(flags);
1506 return 1;
1507 }
1508 kunmap_atomic(buffer - sg->offset);
1509 local_irq_restore(flags);
1510 }
1511 return 0;
1512 }
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522 static int
1523 ips_alloc_passthru_buffer(ips_ha_t * ha, int length)
1524 {
1525 void *bigger_buf;
1526 dma_addr_t dma_busaddr;
1527
1528 if (ha->ioctl_data && length <= ha->ioctl_len)
1529 return 0;
1530
1531 bigger_buf = dma_alloc_coherent(&ha->pcidev->dev, length, &dma_busaddr,
1532 GFP_KERNEL);
1533 if (bigger_buf) {
1534
1535 dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
1536 ha->ioctl_data, ha->ioctl_busaddr);
1537
1538 ha->ioctl_data = (char *) bigger_buf;
1539 ha->ioctl_len = length;
1540 ha->ioctl_busaddr = dma_busaddr;
1541 } else {
1542 return -1;
1543 }
1544 return 0;
1545 }
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556 static int
1557 ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
1558 {
1559 ips_passthru_t *pt;
1560 int length = 0;
1561 int i, ret;
1562 struct scatterlist *sg = scsi_sglist(SC);
1563
1564 METHOD_TRACE("ips_make_passthru", 1);
1565
1566 scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
1567 length += sg->length;
1568
1569 if (length < sizeof (ips_passthru_t)) {
1570
1571 DEBUG_VAR(1, "(%s%d) Passthru structure wrong size",
1572 ips_name, ha->host_num);
1573 return (IPS_FAILURE);
1574 }
1575 if (ips_alloc_passthru_buffer(ha, length)) {
1576
1577
1578 if (ha->ioctl_data) {
1579 pt = (ips_passthru_t *) ha->ioctl_data;
1580 ips_scmd_buf_read(SC, pt, sizeof (ips_passthru_t));
1581 pt->BasicStatus = 0x0B;
1582 pt->ExtendedStatus = 0x00;
1583 ips_scmd_buf_write(SC, pt, sizeof (ips_passthru_t));
1584 }
1585 return IPS_FAILURE;
1586 }
1587 ha->ioctl_datasize = length;
1588
1589 ips_scmd_buf_read(SC, ha->ioctl_data, ha->ioctl_datasize);
1590 pt = (ips_passthru_t *) ha->ioctl_data;
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602 switch (pt->CoppCmd) {
1603 case IPS_NUMCTRLS:
1604 memcpy(ha->ioctl_data + sizeof (ips_passthru_t),
1605 &ips_num_controllers, sizeof (int));
1606 ips_scmd_buf_write(SC, ha->ioctl_data,
1607 sizeof (ips_passthru_t) + sizeof (int));
1608 SC->result = DID_OK << 16;
1609
1610 return (IPS_SUCCESS_IMM);
1611
1612 case IPS_COPPUSRCMD:
1613 case IPS_COPPIOCCMD:
1614 if (SC->cmnd[0] == IPS_IOCTL_COMMAND) {
1615 if (length < (sizeof (ips_passthru_t) + pt->CmdBSize)) {
1616
1617 DEBUG_VAR(1,
1618 "(%s%d) Passthru structure wrong size",
1619 ips_name, ha->host_num);
1620
1621 return (IPS_FAILURE);
1622 }
1623
1624 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
1625 pt->CoppCP.cmd.flashfw.op_code ==
1626 IPS_CMD_RW_BIOSFW) {
1627 ret = ips_flash_copperhead(ha, pt, scb);
1628 ips_scmd_buf_write(SC, ha->ioctl_data,
1629 sizeof (ips_passthru_t));
1630 return ret;
1631 }
1632 if (ips_usrcmd(ha, pt, scb))
1633 return (IPS_SUCCESS);
1634 else
1635 return (IPS_FAILURE);
1636 }
1637
1638 break;
1639
1640 }
1641
1642 return (IPS_FAILURE);
1643 }
1644
1645
1646
1647
1648
1649
1650 static int
1651 ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1652 {
1653 int datasize;
1654
1655
1656
1657 if (IPS_IS_TROMBONE(ha) && pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) {
1658 if (ips_usrcmd(ha, pt, scb))
1659 return IPS_SUCCESS;
1660 else
1661 return IPS_FAILURE;
1662 }
1663 pt->BasicStatus = 0x0B;
1664 pt->ExtendedStatus = 0;
1665 scb->scsi_cmd->result = DID_OK << 16;
1666
1667
1668 if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
1669 pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) {
1670 pt->BasicStatus = 0;
1671 return ips_flash_bios(ha, pt, scb);
1672 } else if (pt->CoppCP.cmd.flashfw.packet_num == 0) {
1673 if (ips_FlashData && !test_and_set_bit(0, &ips_FlashDataInUse)){
1674 ha->flash_data = ips_FlashData;
1675 ha->flash_busaddr = ips_flashbusaddr;
1676 ha->flash_len = PAGE_SIZE << 7;
1677 ha->flash_datasize = 0;
1678 } else if (!ha->flash_data) {
1679 datasize = pt->CoppCP.cmd.flashfw.total_packets *
1680 pt->CoppCP.cmd.flashfw.count;
1681 ha->flash_data = dma_alloc_coherent(&ha->pcidev->dev,
1682 datasize, &ha->flash_busaddr, GFP_KERNEL);
1683 if (!ha->flash_data){
1684 printk(KERN_WARNING "Unable to allocate a flash buffer\n");
1685 return IPS_FAILURE;
1686 }
1687 ha->flash_datasize = 0;
1688 ha->flash_len = datasize;
1689 } else
1690 return IPS_FAILURE;
1691 } else {
1692 if (pt->CoppCP.cmd.flashfw.count + ha->flash_datasize >
1693 ha->flash_len) {
1694 ips_free_flash_copperhead(ha);
1695 IPS_PRINTK(KERN_WARNING, ha->pcidev,
1696 "failed size sanity check\n");
1697 return IPS_FAILURE;
1698 }
1699 }
1700 if (!ha->flash_data)
1701 return IPS_FAILURE;
1702 pt->BasicStatus = 0;
1703 memcpy(&ha->flash_data[ha->flash_datasize], pt + 1,
1704 pt->CoppCP.cmd.flashfw.count);
1705 ha->flash_datasize += pt->CoppCP.cmd.flashfw.count;
1706 if (pt->CoppCP.cmd.flashfw.packet_num ==
1707 pt->CoppCP.cmd.flashfw.total_packets - 1) {
1708 if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE)
1709 return ips_flash_bios(ha, pt, scb);
1710 else if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE)
1711 return ips_flash_firmware(ha, pt, scb);
1712 }
1713 return IPS_SUCCESS_IMM;
1714 }
1715
1716
1717
1718
1719
1720
1721 static int
1722 ips_flash_bios(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1723 {
1724
1725 if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
1726 pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_BIOS) {
1727 if ((!ha->func.programbios) || (!ha->func.erasebios) ||
1728 (!ha->func.verifybios))
1729 goto error;
1730 if ((*ha->func.erasebios) (ha)) {
1731 DEBUG_VAR(1,
1732 "(%s%d) flash bios failed - unable to erase flash",
1733 ips_name, ha->host_num);
1734 goto error;
1735 } else
1736 if ((*ha->func.programbios) (ha,
1737 ha->flash_data +
1738 IPS_BIOS_HEADER,
1739 ha->flash_datasize -
1740 IPS_BIOS_HEADER, 0)) {
1741 DEBUG_VAR(1,
1742 "(%s%d) flash bios failed - unable to flash",
1743 ips_name, ha->host_num);
1744 goto error;
1745 } else
1746 if ((*ha->func.verifybios) (ha,
1747 ha->flash_data +
1748 IPS_BIOS_HEADER,
1749 ha->flash_datasize -
1750 IPS_BIOS_HEADER, 0)) {
1751 DEBUG_VAR(1,
1752 "(%s%d) flash bios failed - unable to verify flash",
1753 ips_name, ha->host_num);
1754 goto error;
1755 }
1756 ips_free_flash_copperhead(ha);
1757 return IPS_SUCCESS_IMM;
1758 } else if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
1759 pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) {
1760 if (!ha->func.erasebios)
1761 goto error;
1762 if ((*ha->func.erasebios) (ha)) {
1763 DEBUG_VAR(1,
1764 "(%s%d) flash bios failed - unable to erase flash",
1765 ips_name, ha->host_num);
1766 goto error;
1767 }
1768 return IPS_SUCCESS_IMM;
1769 }
1770 error:
1771 pt->BasicStatus = 0x0B;
1772 pt->ExtendedStatus = 0x00;
1773 ips_free_flash_copperhead(ha);
1774 return IPS_FAILURE;
1775 }
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785 static int
1786 ips_fill_scb_sg_single(ips_ha_t * ha, dma_addr_t busaddr,
1787 ips_scb_t * scb, int indx, unsigned int e_len)
1788 {
1789
1790 int ret_val = 0;
1791
1792 if ((scb->data_len + e_len) > ha->max_xfer) {
1793 e_len = ha->max_xfer - scb->data_len;
1794 scb->breakup = indx;
1795 ++scb->sg_break;
1796 ret_val = -1;
1797 } else {
1798 scb->breakup = 0;
1799 scb->sg_break = 0;
1800 }
1801 if (IPS_USE_ENH_SGLIST(ha)) {
1802 scb->sg_list.enh_list[indx].address_lo =
1803 cpu_to_le32(lower_32_bits(busaddr));
1804 scb->sg_list.enh_list[indx].address_hi =
1805 cpu_to_le32(upper_32_bits(busaddr));
1806 scb->sg_list.enh_list[indx].length = cpu_to_le32(e_len);
1807 } else {
1808 scb->sg_list.std_list[indx].address =
1809 cpu_to_le32(lower_32_bits(busaddr));
1810 scb->sg_list.std_list[indx].length = cpu_to_le32(e_len);
1811 }
1812
1813 ++scb->sg_len;
1814 scb->data_len += e_len;
1815 return ret_val;
1816 }
1817
1818
1819
1820
1821
1822
1823 static int
1824 ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1825 {
1826 IPS_SG_LIST sg_list;
1827 uint32_t cmd_busaddr;
1828
1829 if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE &&
1830 pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_FW) {
1831 memset(&pt->CoppCP.cmd, 0, sizeof (IPS_HOST_COMMAND));
1832 pt->CoppCP.cmd.flashfw.op_code = IPS_CMD_DOWNLOAD;
1833 pt->CoppCP.cmd.flashfw.count = cpu_to_le32(ha->flash_datasize);
1834 } else {
1835 pt->BasicStatus = 0x0B;
1836 pt->ExtendedStatus = 0x00;
1837 ips_free_flash_copperhead(ha);
1838 return IPS_FAILURE;
1839 }
1840
1841 sg_list.list = scb->sg_list.list;
1842 cmd_busaddr = scb->scb_busaddr;
1843
1844 memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD));
1845
1846 scb->sg_list.list = sg_list.list;
1847 scb->scb_busaddr = cmd_busaddr;
1848 scb->bus = scb->scsi_cmd->device->channel;
1849 scb->target_id = scb->scsi_cmd->device->id;
1850 scb->lun = scb->scsi_cmd->device->lun;
1851 scb->sg_len = 0;
1852 scb->data_len = 0;
1853 scb->flags = 0;
1854 scb->op_code = 0;
1855 scb->callback = ipsintr_done;
1856 scb->timeout = ips_cmd_timeout;
1857
1858 scb->data_len = ha->flash_datasize;
1859 scb->data_busaddr =
1860 dma_map_single(&ha->pcidev->dev, ha->flash_data, scb->data_len,
1861 IPS_DMA_DIR(scb));
1862 scb->flags |= IPS_SCB_MAP_SINGLE;
1863 scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
1864 scb->cmd.flashfw.buffer_addr = cpu_to_le32(scb->data_busaddr);
1865 if (pt->TimeOut)
1866 scb->timeout = pt->TimeOut;
1867 scb->scsi_cmd->result = DID_OK << 16;
1868 return IPS_SUCCESS;
1869 }
1870
1871
1872
1873
1874
1875
1876 static void
1877 ips_free_flash_copperhead(ips_ha_t * ha)
1878 {
1879 if (ha->flash_data == ips_FlashData)
1880 test_and_clear_bit(0, &ips_FlashDataInUse);
1881 else if (ha->flash_data)
1882 dma_free_coherent(&ha->pcidev->dev, ha->flash_len,
1883 ha->flash_data, ha->flash_busaddr);
1884 ha->flash_data = NULL;
1885 }
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896 static int
1897 ips_usrcmd(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1898 {
1899 IPS_SG_LIST sg_list;
1900 uint32_t cmd_busaddr;
1901
1902 METHOD_TRACE("ips_usrcmd", 1);
1903
1904 if ((!scb) || (!pt) || (!ha))
1905 return (0);
1906
1907
1908 sg_list.list = scb->sg_list.list;
1909 cmd_busaddr = scb->scb_busaddr;
1910
1911 memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD));
1912 memcpy(&scb->dcdb, &pt->CoppCP.dcdb, sizeof (IPS_DCDB_TABLE));
1913
1914
1915 scb->sg_list.list = sg_list.list;
1916 scb->scb_busaddr = cmd_busaddr;
1917 scb->bus = scb->scsi_cmd->device->channel;
1918 scb->target_id = scb->scsi_cmd->device->id;
1919 scb->lun = scb->scsi_cmd->device->lun;
1920 scb->sg_len = 0;
1921 scb->data_len = 0;
1922 scb->flags = 0;
1923 scb->op_code = 0;
1924 scb->callback = ipsintr_done;
1925 scb->timeout = ips_cmd_timeout;
1926 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
1927
1928
1929 if ((scb->cmd.basic_io.op_code == IPS_CMD_READ_SG) ||
1930 (scb->cmd.basic_io.op_code == IPS_CMD_WRITE_SG) ||
1931 (scb->cmd.basic_io.op_code == IPS_CMD_DCDB_SG))
1932 return (0);
1933
1934 if (pt->CmdBSize) {
1935 scb->data_len = pt->CmdBSize;
1936 scb->data_busaddr = ha->ioctl_busaddr + sizeof (ips_passthru_t);
1937 } else {
1938 scb->data_busaddr = 0L;
1939 }
1940
1941 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
1942 scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr +
1943 (unsigned long) &scb->
1944 dcdb -
1945 (unsigned long) scb);
1946
1947 if (pt->CmdBSize) {
1948 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
1949 scb->dcdb.buffer_pointer =
1950 cpu_to_le32(scb->data_busaddr);
1951 else
1952 scb->cmd.basic_io.sg_addr =
1953 cpu_to_le32(scb->data_busaddr);
1954 }
1955
1956
1957 if (pt->TimeOut) {
1958 scb->timeout = pt->TimeOut;
1959
1960 if (pt->TimeOut <= 10)
1961 scb->dcdb.cmd_attribute |= IPS_TIMEOUT10;
1962 else if (pt->TimeOut <= 60)
1963 scb->dcdb.cmd_attribute |= IPS_TIMEOUT60;
1964 else
1965 scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M;
1966 }
1967
1968
1969 scb->scsi_cmd->result = DID_OK << 16;
1970
1971
1972 return (1);
1973 }
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984 static void
1985 ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
1986 {
1987 ips_passthru_t *pt;
1988
1989 METHOD_TRACE("ips_cleanup_passthru", 1);
1990
1991 if ((!scb) || (!scb->scsi_cmd) || (!scsi_sglist(scb->scsi_cmd))) {
1992 DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru",
1993 ips_name, ha->host_num);
1994
1995 return;
1996 }
1997 pt = (ips_passthru_t *) ha->ioctl_data;
1998
1999
2000 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
2001 memcpy(&pt->CoppCP.dcdb, &scb->dcdb, sizeof (IPS_DCDB_TABLE));
2002
2003 pt->BasicStatus = scb->basic_status;
2004 pt->ExtendedStatus = scb->extended_status;
2005 pt->AdapterType = ha->ad_type;
2006
2007 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
2008 (scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD ||
2009 scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW))
2010 ips_free_flash_copperhead(ha);
2011
2012 ips_scmd_buf_write(scb->scsi_cmd, ha->ioctl_data, ha->ioctl_datasize);
2013 }
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024 static int
2025 ips_host_info(ips_ha_t *ha, struct seq_file *m)
2026 {
2027 METHOD_TRACE("ips_host_info", 1);
2028
2029 seq_puts(m, "\nIBM ServeRAID General Information:\n\n");
2030
2031 if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) &&
2032 (le16_to_cpu(ha->nvram->adapter_type) != 0))
2033 seq_printf(m, "\tController Type : %s\n",
2034 ips_adapter_name[ha->ad_type - 1]);
2035 else
2036 seq_puts(m, "\tController Type : Unknown\n");
2037
2038 if (ha->io_addr)
2039 seq_printf(m,
2040 "\tIO region : 0x%x (%d bytes)\n",
2041 ha->io_addr, ha->io_len);
2042
2043 if (ha->mem_addr) {
2044 seq_printf(m,
2045 "\tMemory region : 0x%x (%d bytes)\n",
2046 ha->mem_addr, ha->mem_len);
2047 seq_printf(m,
2048 "\tShared memory address : 0x%lx\n",
2049 (unsigned long)ha->mem_ptr);
2050 }
2051
2052 seq_printf(m, "\tIRQ number : %d\n", ha->pcidev->irq);
2053
2054
2055
2056
2057 if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) {
2058 if (ha->nvram->bios_low[3] == 0) {
2059 seq_printf(m,
2060 "\tBIOS Version : %c%c%c%c%c%c%c\n",
2061 ha->nvram->bios_high[0], ha->nvram->bios_high[1],
2062 ha->nvram->bios_high[2], ha->nvram->bios_high[3],
2063 ha->nvram->bios_low[0], ha->nvram->bios_low[1],
2064 ha->nvram->bios_low[2]);
2065
2066 } else {
2067 seq_printf(m,
2068 "\tBIOS Version : %c%c%c%c%c%c%c%c\n",
2069 ha->nvram->bios_high[0], ha->nvram->bios_high[1],
2070 ha->nvram->bios_high[2], ha->nvram->bios_high[3],
2071 ha->nvram->bios_low[0], ha->nvram->bios_low[1],
2072 ha->nvram->bios_low[2], ha->nvram->bios_low[3]);
2073 }
2074
2075 }
2076
2077 if (ha->enq->CodeBlkVersion[7] == 0) {
2078 seq_printf(m,
2079 "\tFirmware Version : %c%c%c%c%c%c%c\n",
2080 ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
2081 ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
2082 ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
2083 ha->enq->CodeBlkVersion[6]);
2084 } else {
2085 seq_printf(m,
2086 "\tFirmware Version : %c%c%c%c%c%c%c%c\n",
2087 ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
2088 ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
2089 ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
2090 ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]);
2091 }
2092
2093 if (ha->enq->BootBlkVersion[7] == 0) {
2094 seq_printf(m,
2095 "\tBoot Block Version : %c%c%c%c%c%c%c\n",
2096 ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
2097 ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
2098 ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
2099 ha->enq->BootBlkVersion[6]);
2100 } else {
2101 seq_printf(m,
2102 "\tBoot Block Version : %c%c%c%c%c%c%c%c\n",
2103 ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
2104 ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
2105 ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
2106 ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]);
2107 }
2108
2109 seq_printf(m, "\tDriver Version : %s%s\n",
2110 IPS_VERSION_HIGH, IPS_VERSION_LOW);
2111
2112 seq_printf(m, "\tDriver Build : %d\n",
2113 IPS_BUILD_IDENT);
2114
2115 seq_printf(m, "\tMax Physical Devices : %d\n",
2116 ha->enq->ucMaxPhysicalDevices);
2117 seq_printf(m, "\tMax Active Commands : %d\n",
2118 ha->max_cmds);
2119 seq_printf(m, "\tCurrent Queued Commands : %d\n",
2120 ha->scb_waitlist.count);
2121 seq_printf(m, "\tCurrent Active Commands : %d\n",
2122 ha->scb_activelist.count - ha->num_ioctl);
2123 seq_printf(m, "\tCurrent Queued PT Commands : %d\n",
2124 ha->copp_waitlist.count);
2125 seq_printf(m, "\tCurrent Active PT Commands : %d\n",
2126 ha->num_ioctl);
2127
2128 seq_putc(m, '\n');
2129
2130 return 0;
2131 }
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142 static void
2143 ips_identify_controller(ips_ha_t * ha)
2144 {
2145 METHOD_TRACE("ips_identify_controller", 1);
2146
2147 switch (ha->pcidev->device) {
2148 case IPS_DEVICEID_COPPERHEAD:
2149 if (ha->pcidev->revision <= IPS_REVID_SERVERAID) {
2150 ha->ad_type = IPS_ADTYPE_SERVERAID;
2151 } else if (ha->pcidev->revision == IPS_REVID_SERVERAID2) {
2152 ha->ad_type = IPS_ADTYPE_SERVERAID2;
2153 } else if (ha->pcidev->revision == IPS_REVID_NAVAJO) {
2154 ha->ad_type = IPS_ADTYPE_NAVAJO;
2155 } else if ((ha->pcidev->revision == IPS_REVID_SERVERAID2)
2156 && (ha->slot_num == 0)) {
2157 ha->ad_type = IPS_ADTYPE_KIOWA;
2158 } else if ((ha->pcidev->revision >= IPS_REVID_CLARINETP1) &&
2159 (ha->pcidev->revision <= IPS_REVID_CLARINETP3)) {
2160 if (ha->enq->ucMaxPhysicalDevices == 15)
2161 ha->ad_type = IPS_ADTYPE_SERVERAID3L;
2162 else
2163 ha->ad_type = IPS_ADTYPE_SERVERAID3;
2164 } else if ((ha->pcidev->revision >= IPS_REVID_TROMBONE32) &&
2165 (ha->pcidev->revision <= IPS_REVID_TROMBONE64)) {
2166 ha->ad_type = IPS_ADTYPE_SERVERAID4H;
2167 }
2168 break;
2169
2170 case IPS_DEVICEID_MORPHEUS:
2171 switch (ha->pcidev->subsystem_device) {
2172 case IPS_SUBDEVICEID_4L:
2173 ha->ad_type = IPS_ADTYPE_SERVERAID4L;
2174 break;
2175
2176 case IPS_SUBDEVICEID_4M:
2177 ha->ad_type = IPS_ADTYPE_SERVERAID4M;
2178 break;
2179
2180 case IPS_SUBDEVICEID_4MX:
2181 ha->ad_type = IPS_ADTYPE_SERVERAID4MX;
2182 break;
2183
2184 case IPS_SUBDEVICEID_4LX:
2185 ha->ad_type = IPS_ADTYPE_SERVERAID4LX;
2186 break;
2187
2188 case IPS_SUBDEVICEID_5I2:
2189 ha->ad_type = IPS_ADTYPE_SERVERAID5I2;
2190 break;
2191
2192 case IPS_SUBDEVICEID_5I1:
2193 ha->ad_type = IPS_ADTYPE_SERVERAID5I1;
2194 break;
2195 }
2196
2197 break;
2198
2199 case IPS_DEVICEID_MARCO:
2200 switch (ha->pcidev->subsystem_device) {
2201 case IPS_SUBDEVICEID_6M:
2202 ha->ad_type = IPS_ADTYPE_SERVERAID6M;
2203 break;
2204 case IPS_SUBDEVICEID_6I:
2205 ha->ad_type = IPS_ADTYPE_SERVERAID6I;
2206 break;
2207 case IPS_SUBDEVICEID_7k:
2208 ha->ad_type = IPS_ADTYPE_SERVERAID7k;
2209 break;
2210 case IPS_SUBDEVICEID_7M:
2211 ha->ad_type = IPS_ADTYPE_SERVERAID7M;
2212 break;
2213 }
2214 break;
2215 }
2216 }
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227 static void
2228 ips_get_bios_version(ips_ha_t * ha, int intr)
2229 {
2230 ips_scb_t *scb;
2231 int ret;
2232 uint8_t major;
2233 uint8_t minor;
2234 uint8_t subminor;
2235 uint8_t *buffer;
2236
2237 METHOD_TRACE("ips_get_bios_version", 1);
2238
2239 major = 0;
2240 minor = 0;
2241
2242 strncpy(ha->bios_version, " ?", 8);
2243
2244 if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) {
2245 if (IPS_USE_MEMIO(ha)) {
2246
2247
2248
2249 writel(0, ha->mem_ptr + IPS_REG_FLAP);
2250 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2251 udelay(25);
2252
2253 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
2254 return;
2255
2256 writel(1, ha->mem_ptr + IPS_REG_FLAP);
2257 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2258 udelay(25);
2259
2260 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
2261 return;
2262
2263
2264 writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP);
2265 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2266 udelay(25);
2267
2268 major = readb(ha->mem_ptr + IPS_REG_FLDP);
2269
2270
2271 writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP);
2272 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2273 udelay(25);
2274 minor = readb(ha->mem_ptr + IPS_REG_FLDP);
2275
2276
2277 writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP);
2278 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2279 udelay(25);
2280 subminor = readb(ha->mem_ptr + IPS_REG_FLDP);
2281
2282 } else {
2283
2284
2285
2286 outl(0, ha->io_addr + IPS_REG_FLAP);
2287 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2288 udelay(25);
2289
2290 if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
2291 return;
2292
2293 outl(1, ha->io_addr + IPS_REG_FLAP);
2294 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2295 udelay(25);
2296
2297 if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
2298 return;
2299
2300
2301 outl(0x1FF, ha->io_addr + IPS_REG_FLAP);
2302 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2303 udelay(25);
2304
2305 major = inb(ha->io_addr + IPS_REG_FLDP);
2306
2307
2308 outl(0x1FE, ha->io_addr + IPS_REG_FLAP);
2309 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2310 udelay(25);
2311
2312 minor = inb(ha->io_addr + IPS_REG_FLDP);
2313
2314
2315 outl(0x1FD, ha->io_addr + IPS_REG_FLAP);
2316 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
2317 udelay(25);
2318
2319 subminor = inb(ha->io_addr + IPS_REG_FLDP);
2320
2321 }
2322 } else {
2323
2324
2325 buffer = ha->ioctl_data;
2326
2327 memset(buffer, 0, 0x1000);
2328
2329 scb = &ha->scbs[ha->max_cmds - 1];
2330
2331 ips_init_scb(ha, scb);
2332
2333 scb->timeout = ips_cmd_timeout;
2334 scb->cdb[0] = IPS_CMD_RW_BIOSFW;
2335
2336 scb->cmd.flashfw.op_code = IPS_CMD_RW_BIOSFW;
2337 scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
2338 scb->cmd.flashfw.type = 1;
2339 scb->cmd.flashfw.direction = 0;
2340 scb->cmd.flashfw.count = cpu_to_le32(0x800);
2341 scb->cmd.flashfw.total_packets = 1;
2342 scb->cmd.flashfw.packet_num = 0;
2343 scb->data_len = 0x1000;
2344 scb->cmd.flashfw.buffer_addr = ha->ioctl_busaddr;
2345
2346
2347 if (((ret =
2348 ips_send_wait(ha, scb, ips_cmd_timeout,
2349 intr)) == IPS_FAILURE)
2350 || (ret == IPS_SUCCESS_IMM)
2351 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
2352
2353
2354 return;
2355 }
2356
2357 if ((buffer[0xC0] == 0x55) && (buffer[0xC1] == 0xAA)) {
2358 major = buffer[0x1ff + 0xC0];
2359 minor = buffer[0x1fe + 0xC0];
2360 subminor = buffer[0x1fd + 0xC0];
2361 } else {
2362 return;
2363 }
2364 }
2365
2366 ha->bios_version[0] = hex_asc_upper_hi(major);
2367 ha->bios_version[1] = '.';
2368 ha->bios_version[2] = hex_asc_upper_lo(major);
2369 ha->bios_version[3] = hex_asc_upper_lo(subminor);
2370 ha->bios_version[4] = '.';
2371 ha->bios_version[5] = hex_asc_upper_hi(minor);
2372 ha->bios_version[6] = hex_asc_upper_lo(minor);
2373 ha->bios_version[7] = 0;
2374 }
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387 static int
2388 ips_hainit(ips_ha_t * ha)
2389 {
2390 int i;
2391
2392 METHOD_TRACE("ips_hainit", 1);
2393
2394 if (!ha)
2395 return (0);
2396
2397 if (ha->func.statinit)
2398 (*ha->func.statinit) (ha);
2399
2400 if (ha->func.enableint)
2401 (*ha->func.enableint) (ha);
2402
2403
2404 ha->reset_count = 1;
2405 ha->last_ffdc = ktime_get_real_seconds();
2406 ips_ffdc_reset(ha, IPS_INTR_IORL);
2407
2408 if (!ips_read_config(ha, IPS_INTR_IORL)) {
2409 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2410 "unable to read config from controller.\n");
2411
2412 return (0);
2413 }
2414
2415 if (!ips_read_adapter_status(ha, IPS_INTR_IORL)) {
2416 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2417 "unable to read controller status.\n");
2418
2419 return (0);
2420 }
2421
2422
2423 ips_identify_controller(ha);
2424
2425 if (!ips_read_subsystem_parameters(ha, IPS_INTR_IORL)) {
2426 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2427 "unable to read subsystem parameters.\n");
2428
2429 return (0);
2430 }
2431
2432
2433 if (!ips_write_driver_status(ha, IPS_INTR_IORL)) {
2434 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2435 "unable to write driver info to controller.\n");
2436
2437 return (0);
2438 }
2439
2440
2441 if ((ha->conf->ucLogDriveCount > 0) && (ha->requires_esl == 1))
2442 ips_clear_adapter(ha, IPS_INTR_IORL);
2443
2444
2445 ha->ntargets = IPS_MAX_TARGETS + 1;
2446 ha->nlun = 1;
2447 ha->nbus = (ha->enq->ucMaxPhysicalDevices / IPS_MAX_TARGETS) + 1;
2448
2449 switch (ha->conf->logical_drive[0].ucStripeSize) {
2450 case 4:
2451 ha->max_xfer = 0x10000;
2452 break;
2453
2454 case 5:
2455 ha->max_xfer = 0x20000;
2456 break;
2457
2458 case 6:
2459 ha->max_xfer = 0x40000;
2460 break;
2461
2462 case 7:
2463 default:
2464 ha->max_xfer = 0x80000;
2465 break;
2466 }
2467
2468
2469 if (le32_to_cpu(ha->subsys->param[4]) & 0x1) {
2470
2471 ha->max_cmds = ha->enq->ucConcurrentCmdCount;
2472 } else {
2473
2474 switch (ha->conf->logical_drive[0].ucStripeSize) {
2475 case 4:
2476 ha->max_cmds = 32;
2477 break;
2478
2479 case 5:
2480 ha->max_cmds = 16;
2481 break;
2482
2483 case 6:
2484 ha->max_cmds = 8;
2485 break;
2486
2487 case 7:
2488 default:
2489 ha->max_cmds = 4;
2490 break;
2491 }
2492 }
2493
2494
2495 if ((ha->ad_type == IPS_ADTYPE_SERVERAID3L) ||
2496 (ha->ad_type == IPS_ADTYPE_SERVERAID4L) ||
2497 (ha->ad_type == IPS_ADTYPE_SERVERAID4LX)) {
2498 if ((ha->max_cmds > MaxLiteCmds) && (MaxLiteCmds))
2499 ha->max_cmds = MaxLiteCmds;
2500 }
2501
2502
2503 ha->ha_id[0] = IPS_ADAPTER_ID;
2504 for (i = 1; i < ha->nbus; i++) {
2505 ha->ha_id[i] = ha->conf->init_id[i - 1] & 0x1f;
2506 ha->dcdb_active[i - 1] = 0;
2507 }
2508
2509 return (1);
2510 }
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521 static void
2522 ips_next(ips_ha_t * ha, int intr)
2523 {
2524 ips_scb_t *scb;
2525 struct scsi_cmnd *SC;
2526 struct scsi_cmnd *p;
2527 struct scsi_cmnd *q;
2528 ips_copp_wait_item_t *item;
2529 int ret;
2530 struct Scsi_Host *host;
2531 METHOD_TRACE("ips_next", 1);
2532
2533 if (!ha)
2534 return;
2535 host = ips_sh[ha->host_num];
2536
2537
2538
2539
2540 if (intr == IPS_INTR_ON)
2541 spin_lock(host->host_lock);
2542
2543 if ((ha->subsys->param[3] & 0x300000)
2544 && (ha->scb_activelist.count == 0)) {
2545 time64_t now = ktime_get_real_seconds();
2546 if (now - ha->last_ffdc > IPS_SECS_8HOURS) {
2547 ha->last_ffdc = now;
2548 ips_ffdc_time(ha);
2549 }
2550 }
2551
2552
2553
2554
2555
2556
2557
2558
2559 while ((ha->num_ioctl < IPS_MAX_IOCTL) &&
2560 (ha->copp_waitlist.head) && (scb = ips_getscb(ha))) {
2561
2562 item = ips_removeq_copp_head(&ha->copp_waitlist);
2563 ha->num_ioctl++;
2564 if (intr == IPS_INTR_ON)
2565 spin_unlock(host->host_lock);
2566 scb->scsi_cmd = item->scsi_cmd;
2567 kfree(item);
2568
2569 ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr);
2570
2571 if (intr == IPS_INTR_ON)
2572 spin_lock(host->host_lock);
2573 switch (ret) {
2574 case IPS_FAILURE:
2575 if (scb->scsi_cmd) {
2576 scb->scsi_cmd->result = DID_ERROR << 16;
2577 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
2578 }
2579
2580 ips_freescb(ha, scb);
2581 break;
2582 case IPS_SUCCESS_IMM:
2583 if (scb->scsi_cmd) {
2584 scb->scsi_cmd->result = DID_OK << 16;
2585 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
2586 }
2587
2588 ips_freescb(ha, scb);
2589 break;
2590 default:
2591 break;
2592 }
2593
2594 if (ret != IPS_SUCCESS) {
2595 ha->num_ioctl--;
2596 continue;
2597 }
2598
2599 ret = ips_send_cmd(ha, scb);
2600
2601 if (ret == IPS_SUCCESS)
2602 ips_putq_scb_head(&ha->scb_activelist, scb);
2603 else
2604 ha->num_ioctl--;
2605
2606 switch (ret) {
2607 case IPS_FAILURE:
2608 if (scb->scsi_cmd) {
2609 scb->scsi_cmd->result = DID_ERROR << 16;
2610 }
2611
2612 ips_freescb(ha, scb);
2613 break;
2614 case IPS_SUCCESS_IMM:
2615 ips_freescb(ha, scb);
2616 break;
2617 default:
2618 break;
2619 }
2620
2621 }
2622
2623
2624
2625
2626
2627 p = ha->scb_waitlist.head;
2628 while ((p) && (scb = ips_getscb(ha))) {
2629 if ((scmd_channel(p) > 0)
2630 && (ha->
2631 dcdb_active[scmd_channel(p) -
2632 1] & (1 << scmd_id(p)))) {
2633 ips_freescb(ha, scb);
2634 p = (struct scsi_cmnd *) p->host_scribble;
2635 continue;
2636 }
2637
2638 q = p;
2639 SC = ips_removeq_wait(&ha->scb_waitlist, q);
2640
2641 if (intr == IPS_INTR_ON)
2642 spin_unlock(host->host_lock);
2643
2644 SC->result = DID_OK;
2645 SC->host_scribble = NULL;
2646
2647 scb->target_id = SC->device->id;
2648 scb->lun = SC->device->lun;
2649 scb->bus = SC->device->channel;
2650 scb->scsi_cmd = SC;
2651 scb->breakup = 0;
2652 scb->data_len = 0;
2653 scb->callback = ipsintr_done;
2654 scb->timeout = ips_cmd_timeout;
2655 memset(&scb->cmd, 0, 16);
2656
2657
2658 memcpy(scb->cdb, SC->cmnd, SC->cmd_len);
2659
2660 scb->sg_count = scsi_dma_map(SC);
2661 BUG_ON(scb->sg_count < 0);
2662 if (scb->sg_count) {
2663 struct scatterlist *sg;
2664 int i;
2665
2666 scb->flags |= IPS_SCB_MAP_SG;
2667
2668 scsi_for_each_sg(SC, sg, scb->sg_count, i) {
2669 if (ips_fill_scb_sg_single
2670 (ha, sg_dma_address(sg), scb, i,
2671 sg_dma_len(sg)) < 0)
2672 break;
2673 }
2674 scb->dcdb.transfer_length = scb->data_len;
2675 } else {
2676 scb->data_busaddr = 0L;
2677 scb->sg_len = 0;
2678 scb->data_len = 0;
2679 scb->dcdb.transfer_length = 0;
2680 }
2681
2682 scb->dcdb.cmd_attribute =
2683 ips_command_direction[scb->scsi_cmd->cmnd[0]];
2684
2685
2686
2687 if ((scb->scsi_cmd->cmnd[0] == WRITE_BUFFER) &&
2688 (scb->data_len == 0))
2689 scb->dcdb.cmd_attribute = 0;
2690
2691 if (!(scb->dcdb.cmd_attribute & 0x3))
2692 scb->dcdb.transfer_length = 0;
2693
2694 if (scb->data_len >= IPS_MAX_XFER) {
2695 scb->dcdb.cmd_attribute |= IPS_TRANSFER64K;
2696 scb->dcdb.transfer_length = 0;
2697 }
2698 if (intr == IPS_INTR_ON)
2699 spin_lock(host->host_lock);
2700
2701 ret = ips_send_cmd(ha, scb);
2702
2703 switch (ret) {
2704 case IPS_SUCCESS:
2705 ips_putq_scb_head(&ha->scb_activelist, scb);
2706 break;
2707 case IPS_FAILURE:
2708 if (scb->scsi_cmd) {
2709 scb->scsi_cmd->result = DID_ERROR << 16;
2710 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
2711 }
2712
2713 if (scb->bus)
2714 ha->dcdb_active[scb->bus - 1] &=
2715 ~(1 << scb->target_id);
2716
2717 ips_freescb(ha, scb);
2718 break;
2719 case IPS_SUCCESS_IMM:
2720 if (scb->scsi_cmd)
2721 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
2722
2723 if (scb->bus)
2724 ha->dcdb_active[scb->bus - 1] &=
2725 ~(1 << scb->target_id);
2726
2727 ips_freescb(ha, scb);
2728 break;
2729 default:
2730 break;
2731 }
2732
2733 p = (struct scsi_cmnd *) p->host_scribble;
2734
2735 }
2736
2737 if (intr == IPS_INTR_ON)
2738 spin_unlock(host->host_lock);
2739 }
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752 static void
2753 ips_putq_scb_head(ips_scb_queue_t * queue, ips_scb_t * item)
2754 {
2755 METHOD_TRACE("ips_putq_scb_head", 1);
2756
2757 if (!item)
2758 return;
2759
2760 item->q_next = queue->head;
2761 queue->head = item;
2762
2763 if (!queue->tail)
2764 queue->tail = item;
2765
2766 queue->count++;
2767 }
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780 static ips_scb_t *
2781 ips_removeq_scb_head(ips_scb_queue_t * queue)
2782 {
2783 ips_scb_t *item;
2784
2785 METHOD_TRACE("ips_removeq_scb_head", 1);
2786
2787 item = queue->head;
2788
2789 if (!item) {
2790 return (NULL);
2791 }
2792
2793 queue->head = item->q_next;
2794 item->q_next = NULL;
2795
2796 if (queue->tail == item)
2797 queue->tail = NULL;
2798
2799 queue->count--;
2800
2801 return (item);
2802 }
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815 static ips_scb_t *
2816 ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item)
2817 {
2818 ips_scb_t *p;
2819
2820 METHOD_TRACE("ips_removeq_scb", 1);
2821
2822 if (!item)
2823 return (NULL);
2824
2825 if (item == queue->head) {
2826 return (ips_removeq_scb_head(queue));
2827 }
2828
2829 p = queue->head;
2830
2831 while ((p) && (item != p->q_next))
2832 p = p->q_next;
2833
2834 if (p) {
2835
2836 p->q_next = item->q_next;
2837
2838 if (!item->q_next)
2839 queue->tail = p;
2840
2841 item->q_next = NULL;
2842 queue->count--;
2843
2844 return (item);
2845 }
2846
2847 return (NULL);
2848 }
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861 static void ips_putq_wait_tail(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item)
2862 {
2863 METHOD_TRACE("ips_putq_wait_tail", 1);
2864
2865 if (!item)
2866 return;
2867
2868 item->host_scribble = NULL;
2869
2870 if (queue->tail)
2871 queue->tail->host_scribble = (char *) item;
2872
2873 queue->tail = item;
2874
2875 if (!queue->head)
2876 queue->head = item;
2877
2878 queue->count++;
2879 }
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892 static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *queue)
2893 {
2894 struct scsi_cmnd *item;
2895
2896 METHOD_TRACE("ips_removeq_wait_head", 1);
2897
2898 item = queue->head;
2899
2900 if (!item) {
2901 return (NULL);
2902 }
2903
2904 queue->head = (struct scsi_cmnd *) item->host_scribble;
2905 item->host_scribble = NULL;
2906
2907 if (queue->tail == item)
2908 queue->tail = NULL;
2909
2910 queue->count--;
2911
2912 return (item);
2913 }
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926 static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *queue,
2927 struct scsi_cmnd *item)
2928 {
2929 struct scsi_cmnd *p;
2930
2931 METHOD_TRACE("ips_removeq_wait", 1);
2932
2933 if (!item)
2934 return (NULL);
2935
2936 if (item == queue->head) {
2937 return (ips_removeq_wait_head(queue));
2938 }
2939
2940 p = queue->head;
2941
2942 while ((p) && (item != (struct scsi_cmnd *) p->host_scribble))
2943 p = (struct scsi_cmnd *) p->host_scribble;
2944
2945 if (p) {
2946
2947 p->host_scribble = item->host_scribble;
2948
2949 if (!item->host_scribble)
2950 queue->tail = p;
2951
2952 item->host_scribble = NULL;
2953 queue->count--;
2954
2955 return (item);
2956 }
2957
2958 return (NULL);
2959 }
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972 static void
2973 ips_putq_copp_tail(ips_copp_queue_t * queue, ips_copp_wait_item_t * item)
2974 {
2975 METHOD_TRACE("ips_putq_copp_tail", 1);
2976
2977 if (!item)
2978 return;
2979
2980 item->next = NULL;
2981
2982 if (queue->tail)
2983 queue->tail->next = item;
2984
2985 queue->tail = item;
2986
2987 if (!queue->head)
2988 queue->head = item;
2989
2990 queue->count++;
2991 }
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004 static ips_copp_wait_item_t *
3005 ips_removeq_copp_head(ips_copp_queue_t * queue)
3006 {
3007 ips_copp_wait_item_t *item;
3008
3009 METHOD_TRACE("ips_removeq_copp_head", 1);
3010
3011 item = queue->head;
3012
3013 if (!item) {
3014 return (NULL);
3015 }
3016
3017 queue->head = item->next;
3018 item->next = NULL;
3019
3020 if (queue->tail == item)
3021 queue->tail = NULL;
3022
3023 queue->count--;
3024
3025 return (item);
3026 }
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039 static ips_copp_wait_item_t *
3040 ips_removeq_copp(ips_copp_queue_t * queue, ips_copp_wait_item_t * item)
3041 {
3042 ips_copp_wait_item_t *p;
3043
3044 METHOD_TRACE("ips_removeq_copp", 1);
3045
3046 if (!item)
3047 return (NULL);
3048
3049 if (item == queue->head) {
3050 return (ips_removeq_copp_head(queue));
3051 }
3052
3053 p = queue->head;
3054
3055 while ((p) && (item != p->next))
3056 p = p->next;
3057
3058 if (p) {
3059
3060 p->next = item->next;
3061
3062 if (!item->next)
3063 queue->tail = p;
3064
3065 item->next = NULL;
3066 queue->count--;
3067
3068 return (item);
3069 }
3070
3071 return (NULL);
3072 }
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083 static void
3084 ipsintr_blocking(ips_ha_t * ha, ips_scb_t * scb)
3085 {
3086 METHOD_TRACE("ipsintr_blocking", 2);
3087
3088 ips_freescb(ha, scb);
3089 if ((ha->waitflag == TRUE) && (ha->cmd_in_progress == scb->cdb[0])) {
3090 ha->waitflag = FALSE;
3091
3092 return;
3093 }
3094 }
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105 static void
3106 ipsintr_done(ips_ha_t * ha, ips_scb_t * scb)
3107 {
3108 METHOD_TRACE("ipsintr_done", 2);
3109
3110 if (!scb) {
3111 IPS_PRINTK(KERN_WARNING, ha->pcidev,
3112 "Spurious interrupt; scb NULL.\n");
3113
3114 return;
3115 }
3116
3117 if (scb->scsi_cmd == NULL) {
3118
3119 IPS_PRINTK(KERN_WARNING, ha->pcidev,
3120 "Spurious interrupt; scsi_cmd not set.\n");
3121
3122 return;
3123 }
3124
3125 ips_done(ha, scb);
3126 }
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137 static void
3138 ips_done(ips_ha_t * ha, ips_scb_t * scb)
3139 {
3140 int ret;
3141
3142 METHOD_TRACE("ips_done", 1);
3143
3144 if (!scb)
3145 return;
3146
3147 if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) {
3148 ips_cleanup_passthru(ha, scb);
3149 ha->num_ioctl--;
3150 } else {
3151
3152
3153
3154
3155
3156 if ((scb->breakup) || (scb->sg_break)) {
3157 struct scatterlist *sg;
3158 int i, sg_dma_index, ips_sg_index = 0;
3159
3160
3161 scb->data_len = 0;
3162
3163 sg = scsi_sglist(scb->scsi_cmd);
3164
3165
3166 sg_dma_index = scb->breakup;
3167 for (i = 0; i < scb->breakup; i++)
3168 sg = sg_next(sg);
3169
3170
3171 ips_fill_scb_sg_single(ha,
3172 sg_dma_address(sg),
3173 scb, ips_sg_index++,
3174 sg_dma_len(sg));
3175
3176 for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
3177 sg_dma_index++, sg = sg_next(sg)) {
3178 if (ips_fill_scb_sg_single
3179 (ha,
3180 sg_dma_address(sg),
3181 scb, ips_sg_index++,
3182 sg_dma_len(sg)) < 0)
3183 break;
3184 }
3185
3186 scb->dcdb.transfer_length = scb->data_len;
3187 scb->dcdb.cmd_attribute |=
3188 ips_command_direction[scb->scsi_cmd->cmnd[0]];
3189
3190 if (!(scb->dcdb.cmd_attribute & 0x3))
3191 scb->dcdb.transfer_length = 0;
3192
3193 if (scb->data_len >= IPS_MAX_XFER) {
3194 scb->dcdb.cmd_attribute |= IPS_TRANSFER64K;
3195 scb->dcdb.transfer_length = 0;
3196 }
3197
3198 ret = ips_send_cmd(ha, scb);
3199
3200 switch (ret) {
3201 case IPS_FAILURE:
3202 if (scb->scsi_cmd) {
3203 scb->scsi_cmd->result = DID_ERROR << 16;
3204 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
3205 }
3206
3207 ips_freescb(ha, scb);
3208 break;
3209 case IPS_SUCCESS_IMM:
3210 if (scb->scsi_cmd) {
3211 scb->scsi_cmd->result = DID_ERROR << 16;
3212 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
3213 }
3214
3215 ips_freescb(ha, scb);
3216 break;
3217 default:
3218 break;
3219 }
3220
3221 return;
3222 }
3223 }
3224
3225 if (scb->bus) {
3226 ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id);
3227 }
3228
3229 scb->scsi_cmd->scsi_done(scb->scsi_cmd);
3230
3231 ips_freescb(ha, scb);
3232 }
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243 static int
3244 ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
3245 {
3246 int errcode;
3247 int device_error;
3248 uint32_t transfer_len;
3249 IPS_DCDB_TABLE_TAPE *tapeDCDB;
3250 IPS_SCSI_INQ_DATA inquiryData;
3251
3252 METHOD_TRACE("ips_map_status", 1);
3253
3254 if (scb->bus) {
3255 DEBUG_VAR(2,
3256 "(%s%d) Physical device error (%d %d %d): %x %x, Sense Key: %x, ASC: %x, ASCQ: %x",
3257 ips_name, ha->host_num,
3258 scb->scsi_cmd->device->channel,
3259 scb->scsi_cmd->device->id, scb->scsi_cmd->device->lun,
3260 scb->basic_status, scb->extended_status,
3261 scb->extended_status ==
3262 IPS_ERR_CKCOND ? scb->dcdb.sense_info[2] & 0xf : 0,
3263 scb->extended_status ==
3264 IPS_ERR_CKCOND ? scb->dcdb.sense_info[12] : 0,
3265 scb->extended_status ==
3266 IPS_ERR_CKCOND ? scb->dcdb.sense_info[13] : 0);
3267 }
3268
3269
3270 errcode = DID_ERROR;
3271 device_error = 0;
3272
3273 switch (scb->basic_status & IPS_GSC_STATUS_MASK) {
3274 case IPS_CMD_TIMEOUT:
3275 errcode = DID_TIME_OUT;
3276 break;
3277
3278 case IPS_INVAL_OPCO:
3279 case IPS_INVAL_CMD_BLK:
3280 case IPS_INVAL_PARM_BLK:
3281 case IPS_LD_ERROR:
3282 case IPS_CMD_CMPLT_WERROR:
3283 break;
3284
3285 case IPS_PHYS_DRV_ERROR:
3286 switch (scb->extended_status) {
3287 case IPS_ERR_SEL_TO:
3288 if (scb->bus)
3289 errcode = DID_NO_CONNECT;
3290
3291 break;
3292
3293 case IPS_ERR_OU_RUN:
3294 if ((scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB) ||
3295 (scb->cmd.dcdb.op_code ==
3296 IPS_CMD_EXTENDED_DCDB_SG)) {
3297 tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
3298 transfer_len = tapeDCDB->transfer_length;
3299 } else {
3300 transfer_len =
3301 (uint32_t) scb->dcdb.transfer_length;
3302 }
3303
3304 if ((scb->bus) && (transfer_len < scb->data_len)) {
3305
3306 errcode = DID_OK;
3307
3308
3309 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
3310 ips_scmd_buf_read(scb->scsi_cmd,
3311 &inquiryData, sizeof (inquiryData));
3312 if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) {
3313 errcode = DID_TIME_OUT;
3314 break;
3315 }
3316 }
3317 } else
3318 errcode = DID_ERROR;
3319
3320 break;
3321
3322 case IPS_ERR_RECOVERY:
3323
3324 if (scb->bus)
3325 errcode = DID_OK;
3326
3327 break;
3328
3329 case IPS_ERR_HOST_RESET:
3330 case IPS_ERR_DEV_RESET:
3331 errcode = DID_RESET;
3332 break;
3333
3334 case IPS_ERR_CKCOND:
3335 if (scb->bus) {
3336 if ((scb->cmd.dcdb.op_code ==
3337 IPS_CMD_EXTENDED_DCDB)
3338 || (scb->cmd.dcdb.op_code ==
3339 IPS_CMD_EXTENDED_DCDB_SG)) {
3340 tapeDCDB =
3341 (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
3342 memcpy(scb->scsi_cmd->sense_buffer,
3343 tapeDCDB->sense_info,
3344 SCSI_SENSE_BUFFERSIZE);
3345 } else {
3346 memcpy(scb->scsi_cmd->sense_buffer,
3347 scb->dcdb.sense_info,
3348 SCSI_SENSE_BUFFERSIZE);
3349 }
3350 device_error = 2;
3351 }
3352
3353 errcode = DID_OK;
3354
3355 break;
3356
3357 default:
3358 errcode = DID_ERROR;
3359 break;
3360
3361 }
3362 }
3363
3364 scb->scsi_cmd->result = device_error | (errcode << 16);
3365
3366 return (1);
3367 }
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380 static int
3381 ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
3382 {
3383 int ret;
3384
3385 METHOD_TRACE("ips_send_wait", 1);
3386
3387 if (intr != IPS_FFDC) {
3388 ha->waitflag = TRUE;
3389 ha->cmd_in_progress = scb->cdb[0];
3390 }
3391 scb->callback = ipsintr_blocking;
3392 ret = ips_send_cmd(ha, scb);
3393
3394 if ((ret == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM))
3395 return (ret);
3396
3397 if (intr != IPS_FFDC)
3398 ret = ips_wait(ha, timeout, intr);
3399
3400 return (ret);
3401 }
3402
3403
3404
3405
3406
3407
3408
3409
3410 static void
3411 ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
3412 {
3413 unsigned long flags;
3414
3415 local_irq_save(flags);
3416 scsi_sg_copy_from_buffer(scmd, data, count);
3417 local_irq_restore(flags);
3418 }
3419
3420
3421
3422
3423
3424
3425
3426
3427 static void
3428 ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
3429 {
3430 unsigned long flags;
3431
3432 local_irq_save(flags);
3433 scsi_sg_copy_to_buffer(scmd, data, count);
3434 local_irq_restore(flags);
3435 }
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446 static int
3447 ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
3448 {
3449 int ret;
3450 char *sp;
3451 int device_error;
3452 IPS_DCDB_TABLE_TAPE *tapeDCDB;
3453 int TimeOut;
3454
3455 METHOD_TRACE("ips_send_cmd", 1);
3456
3457 ret = IPS_SUCCESS;
3458
3459 if (!scb->scsi_cmd) {
3460
3461
3462 if (scb->bus > 0) {
3463
3464
3465 if ((ha->waitflag == TRUE) &&
3466 (ha->cmd_in_progress == scb->cdb[0])) {
3467 ha->waitflag = FALSE;
3468 }
3469
3470 return (1);
3471 }
3472 } else if ((scb->bus == 0) && (!ips_is_passthru(scb->scsi_cmd))) {
3473
3474 ret = IPS_SUCCESS_IMM;
3475
3476 switch (scb->scsi_cmd->cmnd[0]) {
3477 case ALLOW_MEDIUM_REMOVAL:
3478 case REZERO_UNIT:
3479 case ERASE:
3480 case WRITE_FILEMARKS:
3481 case SPACE:
3482 scb->scsi_cmd->result = DID_ERROR << 16;
3483 break;
3484
3485 case START_STOP:
3486 scb->scsi_cmd->result = DID_OK << 16;
3487 break;
3488
3489 case TEST_UNIT_READY:
3490 case INQUIRY:
3491 if (scb->target_id == IPS_ADAPTER_ID) {
3492
3493
3494
3495
3496 if (scb->scsi_cmd->cmnd[0] == TEST_UNIT_READY)
3497 scb->scsi_cmd->result = DID_OK << 16;
3498
3499 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
3500 IPS_SCSI_INQ_DATA inquiry;
3501
3502 memset(&inquiry, 0,
3503 sizeof (IPS_SCSI_INQ_DATA));
3504
3505 inquiry.DeviceType =
3506 IPS_SCSI_INQ_TYPE_PROCESSOR;
3507 inquiry.DeviceTypeQualifier =
3508 IPS_SCSI_INQ_LU_CONNECTED;
3509 inquiry.Version = IPS_SCSI_INQ_REV2;
3510 inquiry.ResponseDataFormat =
3511 IPS_SCSI_INQ_RD_REV2;
3512 inquiry.AdditionalLength = 31;
3513 inquiry.Flags[0] =
3514 IPS_SCSI_INQ_Address16;
3515 inquiry.Flags[1] =
3516 IPS_SCSI_INQ_WBus16 |
3517 IPS_SCSI_INQ_Sync;
3518 strncpy(inquiry.VendorId, "IBM ",
3519 8);
3520 strncpy(inquiry.ProductId,
3521 "SERVERAID ", 16);
3522 strncpy(inquiry.ProductRevisionLevel,
3523 "1.00", 4);
3524
3525 ips_scmd_buf_write(scb->scsi_cmd,
3526 &inquiry,
3527 sizeof (inquiry));
3528
3529 scb->scsi_cmd->result = DID_OK << 16;
3530 }
3531 } else {
3532 scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO;
3533 scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb);
3534 scb->cmd.logical_info.reserved = 0;
3535 scb->cmd.logical_info.reserved2 = 0;
3536 scb->data_len = sizeof (IPS_LD_INFO);
3537 scb->data_busaddr = ha->logical_drive_info_dma_addr;
3538 scb->flags = 0;
3539 scb->cmd.logical_info.buffer_addr = scb->data_busaddr;
3540 ret = IPS_SUCCESS;
3541 }
3542
3543 break;
3544
3545 case REQUEST_SENSE:
3546 ips_reqsen(ha, scb);
3547 scb->scsi_cmd->result = DID_OK << 16;
3548 break;
3549
3550 case READ_6:
3551 case WRITE_6:
3552 if (!scb->sg_len) {
3553 scb->cmd.basic_io.op_code =
3554 (scb->scsi_cmd->cmnd[0] ==
3555 READ_6) ? IPS_CMD_READ : IPS_CMD_WRITE;
3556 scb->cmd.basic_io.enhanced_sg = 0;
3557 scb->cmd.basic_io.sg_addr =
3558 cpu_to_le32(scb->data_busaddr);
3559 } else {
3560 scb->cmd.basic_io.op_code =
3561 (scb->scsi_cmd->cmnd[0] ==
3562 READ_6) ? IPS_CMD_READ_SG :
3563 IPS_CMD_WRITE_SG;
3564 scb->cmd.basic_io.enhanced_sg =
3565 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3566 scb->cmd.basic_io.sg_addr =
3567 cpu_to_le32(scb->sg_busaddr);
3568 }
3569
3570 scb->cmd.basic_io.segment_4G = 0;
3571 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
3572 scb->cmd.basic_io.log_drv = scb->target_id;
3573 scb->cmd.basic_io.sg_count = scb->sg_len;
3574
3575 if (scb->cmd.basic_io.lba)
3576 le32_add_cpu(&scb->cmd.basic_io.lba,
3577 le16_to_cpu(scb->cmd.basic_io.
3578 sector_count));
3579 else
3580 scb->cmd.basic_io.lba =
3581 (((scb->scsi_cmd->
3582 cmnd[1] & 0x1f) << 16) | (scb->scsi_cmd->
3583 cmnd[2] << 8) |
3584 (scb->scsi_cmd->cmnd[3]));
3585
3586 scb->cmd.basic_io.sector_count =
3587 cpu_to_le16(scb->data_len / IPS_BLKSIZE);
3588
3589 if (le16_to_cpu(scb->cmd.basic_io.sector_count) == 0)
3590 scb->cmd.basic_io.sector_count =
3591 cpu_to_le16(256);
3592
3593 ret = IPS_SUCCESS;
3594 break;
3595
3596 case READ_10:
3597 case WRITE_10:
3598 if (!scb->sg_len) {
3599 scb->cmd.basic_io.op_code =
3600 (scb->scsi_cmd->cmnd[0] ==
3601 READ_10) ? IPS_CMD_READ : IPS_CMD_WRITE;
3602 scb->cmd.basic_io.enhanced_sg = 0;
3603 scb->cmd.basic_io.sg_addr =
3604 cpu_to_le32(scb->data_busaddr);
3605 } else {
3606 scb->cmd.basic_io.op_code =
3607 (scb->scsi_cmd->cmnd[0] ==
3608 READ_10) ? IPS_CMD_READ_SG :
3609 IPS_CMD_WRITE_SG;
3610 scb->cmd.basic_io.enhanced_sg =
3611 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3612 scb->cmd.basic_io.sg_addr =
3613 cpu_to_le32(scb->sg_busaddr);
3614 }
3615
3616 scb->cmd.basic_io.segment_4G = 0;
3617 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
3618 scb->cmd.basic_io.log_drv = scb->target_id;
3619 scb->cmd.basic_io.sg_count = scb->sg_len;
3620
3621 if (scb->cmd.basic_io.lba)
3622 le32_add_cpu(&scb->cmd.basic_io.lba,
3623 le16_to_cpu(scb->cmd.basic_io.
3624 sector_count));
3625 else
3626 scb->cmd.basic_io.lba =
3627 ((scb->scsi_cmd->cmnd[2] << 24) | (scb->
3628 scsi_cmd->
3629 cmnd[3]
3630 << 16) |
3631 (scb->scsi_cmd->cmnd[4] << 8) | scb->
3632 scsi_cmd->cmnd[5]);
3633
3634 scb->cmd.basic_io.sector_count =
3635 cpu_to_le16(scb->data_len / IPS_BLKSIZE);
3636
3637 if (cpu_to_le16(scb->cmd.basic_io.sector_count) == 0) {
3638
3639
3640
3641
3642
3643 scb->scsi_cmd->result = DID_OK << 16;
3644 } else
3645 ret = IPS_SUCCESS;
3646
3647 break;
3648
3649 case RESERVE:
3650 case RELEASE:
3651 scb->scsi_cmd->result = DID_OK << 16;
3652 break;
3653
3654 case MODE_SENSE:
3655 scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY;
3656 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
3657 scb->cmd.basic_io.segment_4G = 0;
3658 scb->cmd.basic_io.enhanced_sg = 0;
3659 scb->data_len = sizeof (*ha->enq);
3660 scb->cmd.basic_io.sg_addr = ha->enq_busaddr;
3661 ret = IPS_SUCCESS;
3662 break;
3663
3664 case READ_CAPACITY:
3665 scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO;
3666 scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb);
3667 scb->cmd.logical_info.reserved = 0;
3668 scb->cmd.logical_info.reserved2 = 0;
3669 scb->cmd.logical_info.reserved3 = 0;
3670 scb->data_len = sizeof (IPS_LD_INFO);
3671 scb->data_busaddr = ha->logical_drive_info_dma_addr;
3672 scb->flags = 0;
3673 scb->cmd.logical_info.buffer_addr = scb->data_busaddr;
3674 ret = IPS_SUCCESS;
3675 break;
3676
3677 case SEND_DIAGNOSTIC:
3678 case REASSIGN_BLOCKS:
3679 case FORMAT_UNIT:
3680 case SEEK_10:
3681 case VERIFY:
3682 case READ_DEFECT_DATA:
3683 case READ_BUFFER:
3684 case WRITE_BUFFER:
3685 scb->scsi_cmd->result = DID_OK << 16;
3686 break;
3687
3688 default:
3689
3690
3691
3692 sp = (char *) scb->scsi_cmd->sense_buffer;
3693
3694 sp[0] = 0x70;
3695 sp[2] = ILLEGAL_REQUEST;
3696 sp[7] = 0x0A;
3697 sp[12] = 0x20;
3698 sp[13] = 0x00;
3699
3700 device_error = 2;
3701 scb->scsi_cmd->result = device_error | (DID_OK << 16);
3702 break;
3703 }
3704 }
3705
3706 if (ret == IPS_SUCCESS_IMM)
3707 return (ret);
3708
3709
3710 if (scb->bus > 0) {
3711
3712
3713
3714 if (ha->conf->dev[scb->bus - 1][scb->target_id].ucState == 0) {
3715 scb->scsi_cmd->result = DID_NO_CONNECT << 16;
3716 return (IPS_SUCCESS_IMM);
3717 }
3718
3719 ha->dcdb_active[scb->bus - 1] |= (1 << scb->target_id);
3720 scb->cmd.dcdb.command_id = IPS_COMMAND_ID(ha, scb);
3721 scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr +
3722 (unsigned long) &scb->
3723 dcdb -
3724 (unsigned long) scb);
3725 scb->cmd.dcdb.reserved = 0;
3726 scb->cmd.dcdb.reserved2 = 0;
3727 scb->cmd.dcdb.reserved3 = 0;
3728 scb->cmd.dcdb.segment_4G = 0;
3729 scb->cmd.dcdb.enhanced_sg = 0;
3730
3731 TimeOut = scb->scsi_cmd->request->timeout;
3732
3733 if (ha->subsys->param[4] & 0x00100000) {
3734 if (!scb->sg_len) {
3735 scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB;
3736 } else {
3737 scb->cmd.dcdb.op_code =
3738 IPS_CMD_EXTENDED_DCDB_SG;
3739 scb->cmd.dcdb.enhanced_sg =
3740 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3741 }
3742
3743 tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
3744 tapeDCDB->device_address =
3745 ((scb->bus - 1) << 4) | scb->target_id;
3746 tapeDCDB->cmd_attribute |= IPS_DISCONNECT_ALLOWED;
3747 tapeDCDB->cmd_attribute &= ~IPS_TRANSFER64K;
3748
3749 if (TimeOut) {
3750 if (TimeOut < (10 * HZ))
3751 tapeDCDB->cmd_attribute |= IPS_TIMEOUT10;
3752 else if (TimeOut < (60 * HZ))
3753 tapeDCDB->cmd_attribute |= IPS_TIMEOUT60;
3754 else if (TimeOut < (1200 * HZ))
3755 tapeDCDB->cmd_attribute |= IPS_TIMEOUT20M;
3756 }
3757
3758 tapeDCDB->cdb_length = scb->scsi_cmd->cmd_len;
3759 tapeDCDB->reserved_for_LUN = 0;
3760 tapeDCDB->transfer_length = scb->data_len;
3761 if (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG)
3762 tapeDCDB->buffer_pointer =
3763 cpu_to_le32(scb->sg_busaddr);
3764 else
3765 tapeDCDB->buffer_pointer =
3766 cpu_to_le32(scb->data_busaddr);
3767 tapeDCDB->sg_count = scb->sg_len;
3768 tapeDCDB->sense_length = sizeof (tapeDCDB->sense_info);
3769 tapeDCDB->scsi_status = 0;
3770 tapeDCDB->reserved = 0;
3771 memcpy(tapeDCDB->scsi_cdb, scb->scsi_cmd->cmnd,
3772 scb->scsi_cmd->cmd_len);
3773 } else {
3774 if (!scb->sg_len) {
3775 scb->cmd.dcdb.op_code = IPS_CMD_DCDB;
3776 } else {
3777 scb->cmd.dcdb.op_code = IPS_CMD_DCDB_SG;
3778 scb->cmd.dcdb.enhanced_sg =
3779 IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
3780 }
3781
3782 scb->dcdb.device_address =
3783 ((scb->bus - 1) << 4) | scb->target_id;
3784 scb->dcdb.cmd_attribute |= IPS_DISCONNECT_ALLOWED;
3785
3786 if (TimeOut) {
3787 if (TimeOut < (10 * HZ))
3788 scb->dcdb.cmd_attribute |= IPS_TIMEOUT10;
3789 else if (TimeOut < (60 * HZ))
3790 scb->dcdb.cmd_attribute |= IPS_TIMEOUT60;
3791 else if (TimeOut < (1200 * HZ))
3792 scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M;
3793 }
3794
3795 scb->dcdb.transfer_length = scb->data_len;
3796 if (scb->dcdb.cmd_attribute & IPS_TRANSFER64K)
3797 scb->dcdb.transfer_length = 0;
3798 if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB_SG)
3799 scb->dcdb.buffer_pointer =
3800 cpu_to_le32(scb->sg_busaddr);
3801 else
3802 scb->dcdb.buffer_pointer =
3803 cpu_to_le32(scb->data_busaddr);
3804 scb->dcdb.cdb_length = scb->scsi_cmd->cmd_len;
3805 scb->dcdb.sense_length = sizeof (scb->dcdb.sense_info);
3806 scb->dcdb.sg_count = scb->sg_len;
3807 scb->dcdb.reserved = 0;
3808 memcpy(scb->dcdb.scsi_cdb, scb->scsi_cmd->cmnd,
3809 scb->scsi_cmd->cmd_len);
3810 scb->dcdb.scsi_status = 0;
3811 scb->dcdb.reserved2[0] = 0;
3812 scb->dcdb.reserved2[1] = 0;
3813 scb->dcdb.reserved2[2] = 0;
3814 }
3815 }
3816
3817 return ((*ha->func.issue) (ha, scb));
3818 }
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829 static void
3830 ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus)
3831 {
3832 ips_scb_t *scb;
3833 ips_stat_t *sp;
3834 uint8_t basic_status;
3835 uint8_t ext_status;
3836 int errcode;
3837 IPS_SCSI_INQ_DATA inquiryData;
3838
3839 METHOD_TRACE("ips_chkstatus", 1);
3840
3841 scb = &ha->scbs[pstatus->fields.command_id];
3842 scb->basic_status = basic_status =
3843 pstatus->fields.basic_status & IPS_BASIC_STATUS_MASK;
3844 scb->extended_status = ext_status = pstatus->fields.extended_status;
3845
3846 sp = &ha->sp;
3847 sp->residue_len = 0;
3848 sp->scb_addr = (void *) scb;
3849
3850
3851 ips_removeq_scb(&ha->scb_activelist, scb);
3852
3853 if (!scb->scsi_cmd)
3854
3855 return;
3856
3857 DEBUG_VAR(2, "(%s%d) ips_chkstatus: cmd 0x%X id %d (%d %d %d)",
3858 ips_name,
3859 ha->host_num,
3860 scb->cdb[0],
3861 scb->cmd.basic_io.command_id,
3862 scb->bus, scb->target_id, scb->lun);
3863
3864 if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd)))
3865
3866 return;
3867
3868 errcode = DID_OK;
3869
3870 if (((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_SUCCESS) ||
3871 ((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_RECOVERED_ERROR)) {
3872
3873 if (scb->bus == 0) {
3874 if ((basic_status & IPS_GSC_STATUS_MASK) ==
3875 IPS_CMD_RECOVERED_ERROR) {
3876 DEBUG_VAR(1,
3877 "(%s%d) Recovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x",
3878 ips_name, ha->host_num,
3879 scb->cmd.basic_io.op_code,
3880 basic_status, ext_status);
3881 }
3882
3883 switch (scb->scsi_cmd->cmnd[0]) {
3884 case ALLOW_MEDIUM_REMOVAL:
3885 case REZERO_UNIT:
3886 case ERASE:
3887 case WRITE_FILEMARKS:
3888 case SPACE:
3889 errcode = DID_ERROR;
3890 break;
3891
3892 case START_STOP:
3893 break;
3894
3895 case TEST_UNIT_READY:
3896 if (!ips_online(ha, scb)) {
3897 errcode = DID_TIME_OUT;
3898 }
3899 break;
3900
3901 case INQUIRY:
3902 if (ips_online(ha, scb)) {
3903 ips_inquiry(ha, scb);
3904 } else {
3905 errcode = DID_TIME_OUT;
3906 }
3907 break;
3908
3909 case REQUEST_SENSE:
3910 ips_reqsen(ha, scb);
3911 break;
3912
3913 case READ_6:
3914 case WRITE_6:
3915 case READ_10:
3916 case WRITE_10:
3917 case RESERVE:
3918 case RELEASE:
3919 break;
3920
3921 case MODE_SENSE:
3922 if (!ips_online(ha, scb)
3923 || !ips_msense(ha, scb)) {
3924 errcode = DID_ERROR;
3925 }
3926 break;
3927
3928 case READ_CAPACITY:
3929 if (ips_online(ha, scb))
3930 ips_rdcap(ha, scb);
3931 else {
3932 errcode = DID_TIME_OUT;
3933 }
3934 break;
3935
3936 case SEND_DIAGNOSTIC:
3937 case REASSIGN_BLOCKS:
3938 break;
3939
3940 case FORMAT_UNIT:
3941 errcode = DID_ERROR;
3942 break;
3943
3944 case SEEK_10:
3945 case VERIFY:
3946 case READ_DEFECT_DATA:
3947 case READ_BUFFER:
3948 case WRITE_BUFFER:
3949 break;
3950
3951 default:
3952 errcode = DID_ERROR;
3953 }
3954
3955 scb->scsi_cmd->result = errcode << 16;
3956 } else {
3957
3958 if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
3959 ips_scmd_buf_read(scb->scsi_cmd,
3960 &inquiryData, sizeof (inquiryData));
3961 if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK)
3962 scb->scsi_cmd->result = DID_TIME_OUT << 16;
3963 }
3964 }
3965 } else {
3966 if (scb->bus == 0) {
3967 DEBUG_VAR(1,
3968 "(%s%d) Unrecovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x",
3969 ips_name, ha->host_num,
3970 scb->cmd.basic_io.op_code, basic_status,
3971 ext_status);
3972 }
3973
3974 ips_map_status(ha, scb, sp);
3975 }
3976 }
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987 static int
3988 ips_online(ips_ha_t * ha, ips_scb_t * scb)
3989 {
3990 METHOD_TRACE("ips_online", 1);
3991
3992 if (scb->target_id >= IPS_MAX_LD)
3993 return (0);
3994
3995 if ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1) {
3996 memset(ha->logical_drive_info, 0, sizeof (IPS_LD_INFO));
3997 return (0);
3998 }
3999
4000 if (ha->logical_drive_info->drive_info[scb->target_id].state !=
4001 IPS_LD_OFFLINE
4002 && ha->logical_drive_info->drive_info[scb->target_id].state !=
4003 IPS_LD_FREE
4004 && ha->logical_drive_info->drive_info[scb->target_id].state !=
4005 IPS_LD_CRS
4006 && ha->logical_drive_info->drive_info[scb->target_id].state !=
4007 IPS_LD_SYS)
4008 return (1);
4009 else
4010 return (0);
4011 }
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022 static int
4023 ips_inquiry(ips_ha_t * ha, ips_scb_t * scb)
4024 {
4025 IPS_SCSI_INQ_DATA inquiry;
4026
4027 METHOD_TRACE("ips_inquiry", 1);
4028
4029 memset(&inquiry, 0, sizeof (IPS_SCSI_INQ_DATA));
4030
4031 inquiry.DeviceType = IPS_SCSI_INQ_TYPE_DASD;
4032 inquiry.DeviceTypeQualifier = IPS_SCSI_INQ_LU_CONNECTED;
4033 inquiry.Version = IPS_SCSI_INQ_REV2;
4034 inquiry.ResponseDataFormat = IPS_SCSI_INQ_RD_REV2;
4035 inquiry.AdditionalLength = 31;
4036 inquiry.Flags[0] = IPS_SCSI_INQ_Address16;
4037 inquiry.Flags[1] =
4038 IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync | IPS_SCSI_INQ_CmdQue;
4039 strncpy(inquiry.VendorId, "IBM ", 8);
4040 strncpy(inquiry.ProductId, "SERVERAID ", 16);
4041 strncpy(inquiry.ProductRevisionLevel, "1.00", 4);
4042
4043 ips_scmd_buf_write(scb->scsi_cmd, &inquiry, sizeof (inquiry));
4044
4045 return (1);
4046 }
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057 static int
4058 ips_rdcap(ips_ha_t * ha, ips_scb_t * scb)
4059 {
4060 IPS_SCSI_CAPACITY cap;
4061
4062 METHOD_TRACE("ips_rdcap", 1);
4063
4064 if (scsi_bufflen(scb->scsi_cmd) < 8)
4065 return (0);
4066
4067 cap.lba =
4068 cpu_to_be32(le32_to_cpu
4069 (ha->logical_drive_info->
4070 drive_info[scb->target_id].sector_count) - 1);
4071 cap.len = cpu_to_be32((uint32_t) IPS_BLKSIZE);
4072
4073 ips_scmd_buf_write(scb->scsi_cmd, &cap, sizeof (cap));
4074
4075 return (1);
4076 }
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087 static int
4088 ips_msense(ips_ha_t * ha, ips_scb_t * scb)
4089 {
4090 uint16_t heads;
4091 uint16_t sectors;
4092 uint32_t cylinders;
4093 IPS_SCSI_MODE_PAGE_DATA mdata;
4094
4095 METHOD_TRACE("ips_msense", 1);
4096
4097 if (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) > 0x400000 &&
4098 (ha->enq->ucMiscFlag & 0x8) == 0) {
4099 heads = IPS_NORM_HEADS;
4100 sectors = IPS_NORM_SECTORS;
4101 } else {
4102 heads = IPS_COMP_HEADS;
4103 sectors = IPS_COMP_SECTORS;
4104 }
4105
4106 cylinders =
4107 (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) -
4108 1) / (heads * sectors);
4109
4110 memset(&mdata, 0, sizeof (IPS_SCSI_MODE_PAGE_DATA));
4111
4112 mdata.hdr.BlockDescLength = 8;
4113
4114 switch (scb->scsi_cmd->cmnd[2] & 0x3f) {
4115 case 0x03:
4116 mdata.pdata.pg3.PageCode = 3;
4117 mdata.pdata.pg3.PageLength = sizeof (IPS_SCSI_MODE_PAGE3);
4118 mdata.hdr.DataLength =
4119 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg3.PageLength;
4120 mdata.pdata.pg3.TracksPerZone = 0;
4121 mdata.pdata.pg3.AltSectorsPerZone = 0;
4122 mdata.pdata.pg3.AltTracksPerZone = 0;
4123 mdata.pdata.pg3.AltTracksPerVolume = 0;
4124 mdata.pdata.pg3.SectorsPerTrack = cpu_to_be16(sectors);
4125 mdata.pdata.pg3.BytesPerSector = cpu_to_be16(IPS_BLKSIZE);
4126 mdata.pdata.pg3.Interleave = cpu_to_be16(1);
4127 mdata.pdata.pg3.TrackSkew = 0;
4128 mdata.pdata.pg3.CylinderSkew = 0;
4129 mdata.pdata.pg3.flags = IPS_SCSI_MP3_SoftSector;
4130 break;
4131
4132 case 0x4:
4133 mdata.pdata.pg4.PageCode = 4;
4134 mdata.pdata.pg4.PageLength = sizeof (IPS_SCSI_MODE_PAGE4);
4135 mdata.hdr.DataLength =
4136 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg4.PageLength;
4137 mdata.pdata.pg4.CylindersHigh =
4138 cpu_to_be16((cylinders >> 8) & 0xFFFF);
4139 mdata.pdata.pg4.CylindersLow = (cylinders & 0xFF);
4140 mdata.pdata.pg4.Heads = heads;
4141 mdata.pdata.pg4.WritePrecompHigh = 0;
4142 mdata.pdata.pg4.WritePrecompLow = 0;
4143 mdata.pdata.pg4.ReducedWriteCurrentHigh = 0;
4144 mdata.pdata.pg4.ReducedWriteCurrentLow = 0;
4145 mdata.pdata.pg4.StepRate = cpu_to_be16(1);
4146 mdata.pdata.pg4.LandingZoneHigh = 0;
4147 mdata.pdata.pg4.LandingZoneLow = 0;
4148 mdata.pdata.pg4.flags = 0;
4149 mdata.pdata.pg4.RotationalOffset = 0;
4150 mdata.pdata.pg4.MediumRotationRate = 0;
4151 break;
4152 case 0x8:
4153 mdata.pdata.pg8.PageCode = 8;
4154 mdata.pdata.pg8.PageLength = sizeof (IPS_SCSI_MODE_PAGE8);
4155 mdata.hdr.DataLength =
4156 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg8.PageLength;
4157
4158 break;
4159
4160 default:
4161 return (0);
4162 }
4163
4164 ips_scmd_buf_write(scb->scsi_cmd, &mdata, sizeof (mdata));
4165
4166 return (1);
4167 }
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178 static int
4179 ips_reqsen(ips_ha_t * ha, ips_scb_t * scb)
4180 {
4181 IPS_SCSI_REQSEN reqsen;
4182
4183 METHOD_TRACE("ips_reqsen", 1);
4184
4185 memset(&reqsen, 0, sizeof (IPS_SCSI_REQSEN));
4186
4187 reqsen.ResponseCode =
4188 IPS_SCSI_REQSEN_VALID | IPS_SCSI_REQSEN_CURRENT_ERR;
4189 reqsen.AdditionalLength = 10;
4190 reqsen.AdditionalSenseCode = IPS_SCSI_REQSEN_NO_SENSE;
4191 reqsen.AdditionalSenseCodeQual = IPS_SCSI_REQSEN_NO_SENSE;
4192
4193 ips_scmd_buf_write(scb->scsi_cmd, &reqsen, sizeof (reqsen));
4194
4195 return (1);
4196 }
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207 static void
4208 ips_free(ips_ha_t * ha)
4209 {
4210
4211 METHOD_TRACE("ips_free", 1);
4212
4213 if (ha) {
4214 if (ha->enq) {
4215 dma_free_coherent(&ha->pcidev->dev, sizeof(IPS_ENQ),
4216 ha->enq, ha->enq_busaddr);
4217 ha->enq = NULL;
4218 }
4219
4220 kfree(ha->conf);
4221 ha->conf = NULL;
4222
4223 if (ha->adapt) {
4224 dma_free_coherent(&ha->pcidev->dev,
4225 sizeof (IPS_ADAPTER) +
4226 sizeof (IPS_IO_CMD), ha->adapt,
4227 ha->adapt->hw_status_start);
4228 ha->adapt = NULL;
4229 }
4230
4231 if (ha->logical_drive_info) {
4232 dma_free_coherent(&ha->pcidev->dev,
4233 sizeof (IPS_LD_INFO),
4234 ha->logical_drive_info,
4235 ha->logical_drive_info_dma_addr);
4236 ha->logical_drive_info = NULL;
4237 }
4238
4239 kfree(ha->nvram);
4240 ha->nvram = NULL;
4241
4242 kfree(ha->subsys);
4243 ha->subsys = NULL;
4244
4245 if (ha->ioctl_data) {
4246 dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
4247 ha->ioctl_data, ha->ioctl_busaddr);
4248 ha->ioctl_data = NULL;
4249 ha->ioctl_datasize = 0;
4250 ha->ioctl_len = 0;
4251 }
4252 ips_deallocatescbs(ha, ha->max_cmds);
4253
4254
4255 if (ha->mem_ptr) {
4256 iounmap(ha->ioremap_ptr);
4257 ha->ioremap_ptr = NULL;
4258 ha->mem_ptr = NULL;
4259 }
4260
4261 ha->mem_addr = 0;
4262
4263 }
4264 }
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275 static int
4276 ips_deallocatescbs(ips_ha_t * ha, int cmds)
4277 {
4278 if (ha->scbs) {
4279 dma_free_coherent(&ha->pcidev->dev,
4280 IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds,
4281 ha->scbs->sg_list.list,
4282 ha->scbs->sg_busaddr);
4283 dma_free_coherent(&ha->pcidev->dev, sizeof (ips_scb_t) * cmds,
4284 ha->scbs, ha->scbs->scb_busaddr);
4285 ha->scbs = NULL;
4286 }
4287 return 1;
4288 }
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299 static int
4300 ips_allocatescbs(ips_ha_t * ha)
4301 {
4302 ips_scb_t *scb_p;
4303 IPS_SG_LIST ips_sg;
4304 int i;
4305 dma_addr_t command_dma, sg_dma;
4306
4307 METHOD_TRACE("ips_allocatescbs", 1);
4308
4309
4310 ha->scbs = dma_alloc_coherent(&ha->pcidev->dev,
4311 ha->max_cmds * sizeof (ips_scb_t),
4312 &command_dma, GFP_KERNEL);
4313 if (ha->scbs == NULL)
4314 return 0;
4315 ips_sg.list = dma_alloc_coherent(&ha->pcidev->dev,
4316 IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * ha->max_cmds,
4317 &sg_dma, GFP_KERNEL);
4318 if (ips_sg.list == NULL) {
4319 dma_free_coherent(&ha->pcidev->dev,
4320 ha->max_cmds * sizeof (ips_scb_t), ha->scbs,
4321 command_dma);
4322 return 0;
4323 }
4324
4325 memset(ha->scbs, 0, ha->max_cmds * sizeof (ips_scb_t));
4326
4327 for (i = 0; i < ha->max_cmds; i++) {
4328 scb_p = &ha->scbs[i];
4329 scb_p->scb_busaddr = command_dma + sizeof (ips_scb_t) * i;
4330
4331 if (IPS_USE_ENH_SGLIST(ha)) {
4332 scb_p->sg_list.enh_list =
4333 ips_sg.enh_list + i * IPS_MAX_SG;
4334 scb_p->sg_busaddr =
4335 sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i;
4336 } else {
4337 scb_p->sg_list.std_list =
4338 ips_sg.std_list + i * IPS_MAX_SG;
4339 scb_p->sg_busaddr =
4340 sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i;
4341 }
4342
4343
4344 if (i < ha->max_cmds - 1) {
4345 scb_p->q_next = ha->scb_freelist;
4346 ha->scb_freelist = scb_p;
4347 }
4348 }
4349
4350
4351 return (1);
4352 }
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363 static void
4364 ips_init_scb(ips_ha_t * ha, ips_scb_t * scb)
4365 {
4366 IPS_SG_LIST sg_list;
4367 uint32_t cmd_busaddr, sg_busaddr;
4368 METHOD_TRACE("ips_init_scb", 1);
4369
4370 if (scb == NULL)
4371 return;
4372
4373 sg_list.list = scb->sg_list.list;
4374 cmd_busaddr = scb->scb_busaddr;
4375 sg_busaddr = scb->sg_busaddr;
4376
4377 memset(scb, 0, sizeof (ips_scb_t));
4378 memset(ha->dummy, 0, sizeof (IPS_IO_CMD));
4379
4380
4381 ha->dummy->op_code = 0xFF;
4382 ha->dummy->ccsar = cpu_to_le32(ha->adapt->hw_status_start
4383 + sizeof (IPS_ADAPTER));
4384 ha->dummy->command_id = IPS_MAX_CMDS;
4385
4386
4387 scb->scb_busaddr = cmd_busaddr;
4388 scb->sg_busaddr = sg_busaddr;
4389 scb->sg_list.list = sg_list.list;
4390
4391
4392 scb->cmd.basic_io.cccr = cpu_to_le32((uint32_t) IPS_BIT_ILE);
4393 scb->cmd.basic_io.ccsar = cpu_to_le32(ha->adapt->hw_status_start
4394 + sizeof (IPS_ADAPTER));
4395 }
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408 static ips_scb_t *
4409 ips_getscb(ips_ha_t * ha)
4410 {
4411 ips_scb_t *scb;
4412
4413 METHOD_TRACE("ips_getscb", 1);
4414
4415 if ((scb = ha->scb_freelist) == NULL) {
4416
4417 return (NULL);
4418 }
4419
4420 ha->scb_freelist = scb->q_next;
4421 scb->flags = 0;
4422 scb->q_next = NULL;
4423
4424 ips_init_scb(ha, scb);
4425
4426 return (scb);
4427 }
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440 static void
4441 ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
4442 {
4443
4444 METHOD_TRACE("ips_freescb", 1);
4445 if (scb->flags & IPS_SCB_MAP_SG)
4446 scsi_dma_unmap(scb->scsi_cmd);
4447 else if (scb->flags & IPS_SCB_MAP_SINGLE)
4448 dma_unmap_single(&ha->pcidev->dev, scb->data_busaddr,
4449 scb->data_len, IPS_DMA_DIR(scb));
4450
4451
4452 if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) {
4453 scb->q_next = ha->scb_freelist;
4454 ha->scb_freelist = scb;
4455 }
4456 }
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467 static int
4468 ips_isinit_copperhead(ips_ha_t * ha)
4469 {
4470 uint8_t scpr;
4471 uint8_t isr;
4472
4473 METHOD_TRACE("ips_isinit_copperhead", 1);
4474
4475 isr = inb(ha->io_addr + IPS_REG_HISR);
4476 scpr = inb(ha->io_addr + IPS_REG_SCPR);
4477
4478 if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0))
4479 return (0);
4480 else
4481 return (1);
4482 }
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493 static int
4494 ips_isinit_copperhead_memio(ips_ha_t * ha)
4495 {
4496 uint8_t isr = 0;
4497 uint8_t scpr;
4498
4499 METHOD_TRACE("ips_is_init_copperhead_memio", 1);
4500
4501 isr = readb(ha->mem_ptr + IPS_REG_HISR);
4502 scpr = readb(ha->mem_ptr + IPS_REG_SCPR);
4503
4504 if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0))
4505 return (0);
4506 else
4507 return (1);
4508 }
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519 static int
4520 ips_isinit_morpheus(ips_ha_t * ha)
4521 {
4522 uint32_t post;
4523 uint32_t bits;
4524
4525 METHOD_TRACE("ips_is_init_morpheus", 1);
4526
4527 if (ips_isintr_morpheus(ha))
4528 ips_flush_and_reset(ha);
4529
4530 post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
4531 bits = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
4532
4533 if (post == 0)
4534 return (0);
4535 else if (bits & 0x3)
4536 return (0);
4537 else
4538 return (1);
4539 }
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551 static void
4552 ips_flush_and_reset(ips_ha_t *ha)
4553 {
4554 ips_scb_t *scb;
4555 int ret;
4556 int time;
4557 int done;
4558 dma_addr_t command_dma;
4559
4560
4561 scb = dma_alloc_coherent(&ha->pcidev->dev, sizeof(ips_scb_t),
4562 &command_dma, GFP_KERNEL);
4563 if (scb) {
4564 memset(scb, 0, sizeof(ips_scb_t));
4565 ips_init_scb(ha, scb);
4566 scb->scb_busaddr = command_dma;
4567
4568 scb->timeout = ips_cmd_timeout;
4569 scb->cdb[0] = IPS_CMD_FLUSH;
4570
4571 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
4572 scb->cmd.flush_cache.command_id = IPS_MAX_CMDS;
4573 scb->cmd.flush_cache.state = IPS_NORM_STATE;
4574 scb->cmd.flush_cache.reserved = 0;
4575 scb->cmd.flush_cache.reserved2 = 0;
4576 scb->cmd.flush_cache.reserved3 = 0;
4577 scb->cmd.flush_cache.reserved4 = 0;
4578
4579 ret = ips_send_cmd(ha, scb);
4580
4581 if (ret == IPS_SUCCESS) {
4582 time = 60 * IPS_ONE_SEC;
4583 done = 0;
4584
4585 while ((time > 0) && (!done)) {
4586 done = ips_poll_for_flush_complete(ha);
4587
4588 udelay(1000);
4589 time--;
4590 }
4591 }
4592 }
4593
4594
4595 (*ha->func.reset) (ha);
4596
4597 dma_free_coherent(&ha->pcidev->dev, sizeof(ips_scb_t), scb, command_dma);
4598 return;
4599 }
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611 static int
4612 ips_poll_for_flush_complete(ips_ha_t * ha)
4613 {
4614 IPS_STATUS cstatus;
4615
4616 while (TRUE) {
4617 cstatus.value = (*ha->func.statupd) (ha);
4618
4619 if (cstatus.value == 0xffffffff)
4620 break;
4621
4622
4623 if (cstatus.fields.command_id == IPS_MAX_CMDS)
4624 return 1;
4625 }
4626
4627 return 0;
4628 }
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638 static void
4639 ips_enable_int_copperhead(ips_ha_t * ha)
4640 {
4641 METHOD_TRACE("ips_enable_int_copperhead", 1);
4642
4643 outb(ha->io_addr + IPS_REG_HISR, IPS_BIT_EI);
4644 inb(ha->io_addr + IPS_REG_HISR);
4645 }
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655 static void
4656 ips_enable_int_copperhead_memio(ips_ha_t * ha)
4657 {
4658 METHOD_TRACE("ips_enable_int_copperhead_memio", 1);
4659
4660 writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR);
4661 readb(ha->mem_ptr + IPS_REG_HISR);
4662 }
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672 static void
4673 ips_enable_int_morpheus(ips_ha_t * ha)
4674 {
4675 uint32_t Oimr;
4676
4677 METHOD_TRACE("ips_enable_int_morpheus", 1);
4678
4679 Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR);
4680 Oimr &= ~0x08;
4681 writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR);
4682 readl(ha->mem_ptr + IPS_REG_I960_OIMR);
4683 }
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694 static int
4695 ips_init_copperhead(ips_ha_t * ha)
4696 {
4697 uint8_t Isr;
4698 uint8_t Cbsp;
4699 uint8_t PostByte[IPS_MAX_POST_BYTES];
4700 uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES];
4701 int i, j;
4702
4703 METHOD_TRACE("ips_init_copperhead", 1);
4704
4705 for (i = 0; i < IPS_MAX_POST_BYTES; i++) {
4706 for (j = 0; j < 45; j++) {
4707 Isr = inb(ha->io_addr + IPS_REG_HISR);
4708 if (Isr & IPS_BIT_GHI)
4709 break;
4710
4711
4712 MDELAY(IPS_ONE_SEC);
4713 }
4714
4715 if (j >= 45)
4716
4717 return (0);
4718
4719 PostByte[i] = inb(ha->io_addr + IPS_REG_ISPR);
4720 outb(Isr, ha->io_addr + IPS_REG_HISR);
4721 }
4722
4723 if (PostByte[0] < IPS_GOOD_POST_STATUS) {
4724 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4725 "reset controller fails (post status %x %x).\n",
4726 PostByte[0], PostByte[1]);
4727
4728 return (0);
4729 }
4730
4731 for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) {
4732 for (j = 0; j < 240; j++) {
4733 Isr = inb(ha->io_addr + IPS_REG_HISR);
4734 if (Isr & IPS_BIT_GHI)
4735 break;
4736
4737
4738 MDELAY(IPS_ONE_SEC);
4739 }
4740
4741 if (j >= 240)
4742
4743 return (0);
4744
4745 ConfigByte[i] = inb(ha->io_addr + IPS_REG_ISPR);
4746 outb(Isr, ha->io_addr + IPS_REG_HISR);
4747 }
4748
4749 for (i = 0; i < 240; i++) {
4750 Cbsp = inb(ha->io_addr + IPS_REG_CBSP);
4751
4752 if ((Cbsp & IPS_BIT_OP) == 0)
4753 break;
4754
4755
4756 MDELAY(IPS_ONE_SEC);
4757 }
4758
4759 if (i >= 240)
4760
4761 return (0);
4762
4763
4764 outl(0x1010, ha->io_addr + IPS_REG_CCCR);
4765
4766
4767 outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR);
4768
4769 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
4770
4771 outl(0, ha->io_addr + IPS_REG_NDAE);
4772
4773
4774 outb(IPS_BIT_EI, ha->io_addr + IPS_REG_HISR);
4775
4776 return (1);
4777 }
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788 static int
4789 ips_init_copperhead_memio(ips_ha_t * ha)
4790 {
4791 uint8_t Isr = 0;
4792 uint8_t Cbsp;
4793 uint8_t PostByte[IPS_MAX_POST_BYTES];
4794 uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES];
4795 int i, j;
4796
4797 METHOD_TRACE("ips_init_copperhead_memio", 1);
4798
4799 for (i = 0; i < IPS_MAX_POST_BYTES; i++) {
4800 for (j = 0; j < 45; j++) {
4801 Isr = readb(ha->mem_ptr + IPS_REG_HISR);
4802 if (Isr & IPS_BIT_GHI)
4803 break;
4804
4805
4806 MDELAY(IPS_ONE_SEC);
4807 }
4808
4809 if (j >= 45)
4810
4811 return (0);
4812
4813 PostByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR);
4814 writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
4815 }
4816
4817 if (PostByte[0] < IPS_GOOD_POST_STATUS) {
4818 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4819 "reset controller fails (post status %x %x).\n",
4820 PostByte[0], PostByte[1]);
4821
4822 return (0);
4823 }
4824
4825 for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) {
4826 for (j = 0; j < 240; j++) {
4827 Isr = readb(ha->mem_ptr + IPS_REG_HISR);
4828 if (Isr & IPS_BIT_GHI)
4829 break;
4830
4831
4832 MDELAY(IPS_ONE_SEC);
4833 }
4834
4835 if (j >= 240)
4836
4837 return (0);
4838
4839 ConfigByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR);
4840 writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
4841 }
4842
4843 for (i = 0; i < 240; i++) {
4844 Cbsp = readb(ha->mem_ptr + IPS_REG_CBSP);
4845
4846 if ((Cbsp & IPS_BIT_OP) == 0)
4847 break;
4848
4849
4850 MDELAY(IPS_ONE_SEC);
4851 }
4852
4853 if (i >= 240)
4854
4855 return (0);
4856
4857
4858 writel(0x1010, ha->mem_ptr + IPS_REG_CCCR);
4859
4860
4861 writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR);
4862
4863 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
4864
4865 writel(0, ha->mem_ptr + IPS_REG_NDAE);
4866
4867
4868 writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR);
4869
4870
4871 return (1);
4872 }
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883 static int
4884 ips_init_morpheus(ips_ha_t * ha)
4885 {
4886 uint32_t Post;
4887 uint32_t Config;
4888 uint32_t Isr;
4889 uint32_t Oimr;
4890 int i;
4891
4892 METHOD_TRACE("ips_init_morpheus", 1);
4893
4894
4895 for (i = 0; i < 45; i++) {
4896 Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
4897
4898 if (Isr & IPS_BIT_I960_MSG0I)
4899 break;
4900
4901
4902 MDELAY(IPS_ONE_SEC);
4903 }
4904
4905 if (i >= 45) {
4906
4907 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4908 "timeout waiting for post.\n");
4909
4910 return (0);
4911 }
4912
4913 Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
4914
4915 if (Post == 0x4F00) {
4916 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4917 "Flashing Battery PIC, Please wait ...\n");
4918
4919
4920 Isr = (uint32_t) IPS_BIT_I960_MSG0I;
4921 writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
4922
4923 for (i = 0; i < 120; i++) {
4924 Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
4925 if (Post != 0x4F00)
4926 break;
4927
4928 MDELAY(IPS_ONE_SEC);
4929 }
4930
4931 if (i >= 120) {
4932 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4933 "timeout waiting for Battery PIC Flash\n");
4934 return (0);
4935 }
4936
4937 }
4938
4939
4940 Isr = (uint32_t) IPS_BIT_I960_MSG0I;
4941 writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
4942
4943 if (Post < (IPS_GOOD_POST_STATUS << 8)) {
4944 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4945 "reset controller fails (post status %x).\n", Post);
4946
4947 return (0);
4948 }
4949
4950
4951 for (i = 0; i < 240; i++) {
4952 Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
4953
4954 if (Isr & IPS_BIT_I960_MSG1I)
4955 break;
4956
4957
4958 MDELAY(IPS_ONE_SEC);
4959 }
4960
4961 if (i >= 240) {
4962
4963 IPS_PRINTK(KERN_WARNING, ha->pcidev,
4964 "timeout waiting for config.\n");
4965
4966 return (0);
4967 }
4968
4969 Config = readl(ha->mem_ptr + IPS_REG_I960_MSG1);
4970
4971
4972 Isr = (uint32_t) IPS_BIT_I960_MSG1I;
4973 writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
4974
4975
4976 Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR);
4977 Oimr &= ~0x8;
4978 writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR);
4979
4980
4981
4982
4983 if (Post == 0xEF10) {
4984 if ((Config == 0x000F) || (Config == 0x0009))
4985 ha->requires_esl = 1;
4986 }
4987
4988 return (1);
4989 }
4990
4991
4992
4993
4994
4995
4996
4997
4998
4999
5000 static int
5001 ips_reset_copperhead(ips_ha_t * ha)
5002 {
5003 int reset_counter;
5004
5005 METHOD_TRACE("ips_reset_copperhead", 1);
5006
5007 DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d",
5008 ips_name, ha->host_num, ha->io_addr, ha->pcidev->irq);
5009
5010 reset_counter = 0;
5011
5012 while (reset_counter < 2) {
5013 reset_counter++;
5014
5015 outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR);
5016
5017
5018 MDELAY(IPS_ONE_SEC);
5019
5020 outb(0, ha->io_addr + IPS_REG_SCPR);
5021
5022
5023 MDELAY(IPS_ONE_SEC);
5024
5025 if ((*ha->func.init) (ha))
5026 break;
5027 else if (reset_counter >= 2) {
5028
5029 return (0);
5030 }
5031 }
5032
5033 return (1);
5034 }
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045 static int
5046 ips_reset_copperhead_memio(ips_ha_t * ha)
5047 {
5048 int reset_counter;
5049
5050 METHOD_TRACE("ips_reset_copperhead_memio", 1);
5051
5052 DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d",
5053 ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
5054
5055 reset_counter = 0;
5056
5057 while (reset_counter < 2) {
5058 reset_counter++;
5059
5060 writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR);
5061
5062
5063 MDELAY(IPS_ONE_SEC);
5064
5065 writeb(0, ha->mem_ptr + IPS_REG_SCPR);
5066
5067
5068 MDELAY(IPS_ONE_SEC);
5069
5070 if ((*ha->func.init) (ha))
5071 break;
5072 else if (reset_counter >= 2) {
5073
5074 return (0);
5075 }
5076 }
5077
5078 return (1);
5079 }
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090 static int
5091 ips_reset_morpheus(ips_ha_t * ha)
5092 {
5093 int reset_counter;
5094 uint8_t junk;
5095
5096 METHOD_TRACE("ips_reset_morpheus", 1);
5097
5098 DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d",
5099 ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
5100
5101 reset_counter = 0;
5102
5103 while (reset_counter < 2) {
5104 reset_counter++;
5105
5106 writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR);
5107
5108
5109 MDELAY(5 * IPS_ONE_SEC);
5110
5111
5112 pci_read_config_byte(ha->pcidev, 4, &junk);
5113
5114 if ((*ha->func.init) (ha))
5115 break;
5116 else if (reset_counter >= 2) {
5117
5118 return (0);
5119 }
5120 }
5121
5122 return (1);
5123 }
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134 static void
5135 ips_statinit(ips_ha_t * ha)
5136 {
5137 uint32_t phys_status_start;
5138
5139 METHOD_TRACE("ips_statinit", 1);
5140
5141 ha->adapt->p_status_start = ha->adapt->status;
5142 ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS;
5143 ha->adapt->p_status_tail = ha->adapt->status;
5144
5145 phys_status_start = ha->adapt->hw_status_start;
5146 outl(phys_status_start, ha->io_addr + IPS_REG_SQSR);
5147 outl(phys_status_start + IPS_STATUS_Q_SIZE,
5148 ha->io_addr + IPS_REG_SQER);
5149 outl(phys_status_start + IPS_STATUS_SIZE,
5150 ha->io_addr + IPS_REG_SQHR);
5151 outl(phys_status_start, ha->io_addr + IPS_REG_SQTR);
5152
5153 ha->adapt->hw_status_tail = phys_status_start;
5154 }
5155
5156
5157
5158
5159
5160
5161
5162
5163
5164
5165 static void
5166 ips_statinit_memio(ips_ha_t * ha)
5167 {
5168 uint32_t phys_status_start;
5169
5170 METHOD_TRACE("ips_statinit_memio", 1);
5171
5172 ha->adapt->p_status_start = ha->adapt->status;
5173 ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS;
5174 ha->adapt->p_status_tail = ha->adapt->status;
5175
5176 phys_status_start = ha->adapt->hw_status_start;
5177 writel(phys_status_start, ha->mem_ptr + IPS_REG_SQSR);
5178 writel(phys_status_start + IPS_STATUS_Q_SIZE,
5179 ha->mem_ptr + IPS_REG_SQER);
5180 writel(phys_status_start + IPS_STATUS_SIZE, ha->mem_ptr + IPS_REG_SQHR);
5181 writel(phys_status_start, ha->mem_ptr + IPS_REG_SQTR);
5182
5183 ha->adapt->hw_status_tail = phys_status_start;
5184 }
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195 static uint32_t
5196 ips_statupd_copperhead(ips_ha_t * ha)
5197 {
5198 METHOD_TRACE("ips_statupd_copperhead", 1);
5199
5200 if (ha->adapt->p_status_tail != ha->adapt->p_status_end) {
5201 ha->adapt->p_status_tail++;
5202 ha->adapt->hw_status_tail += sizeof (IPS_STATUS);
5203 } else {
5204 ha->adapt->p_status_tail = ha->adapt->p_status_start;
5205 ha->adapt->hw_status_tail = ha->adapt->hw_status_start;
5206 }
5207
5208 outl(ha->adapt->hw_status_tail,
5209 ha->io_addr + IPS_REG_SQTR);
5210
5211 return (ha->adapt->p_status_tail->value);
5212 }
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223 static uint32_t
5224 ips_statupd_copperhead_memio(ips_ha_t * ha)
5225 {
5226 METHOD_TRACE("ips_statupd_copperhead_memio", 1);
5227
5228 if (ha->adapt->p_status_tail != ha->adapt->p_status_end) {
5229 ha->adapt->p_status_tail++;
5230 ha->adapt->hw_status_tail += sizeof (IPS_STATUS);
5231 } else {
5232 ha->adapt->p_status_tail = ha->adapt->p_status_start;
5233 ha->adapt->hw_status_tail = ha->adapt->hw_status_start;
5234 }
5235
5236 writel(ha->adapt->hw_status_tail, ha->mem_ptr + IPS_REG_SQTR);
5237
5238 return (ha->adapt->p_status_tail->value);
5239 }
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250 static uint32_t
5251 ips_statupd_morpheus(ips_ha_t * ha)
5252 {
5253 uint32_t val;
5254
5255 METHOD_TRACE("ips_statupd_morpheus", 1);
5256
5257 val = readl(ha->mem_ptr + IPS_REG_I2O_OUTMSGQ);
5258
5259 return (val);
5260 }
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270
5271 static int
5272 ips_issue_copperhead(ips_ha_t * ha, ips_scb_t * scb)
5273 {
5274 uint32_t TimeOut;
5275 uint32_t val;
5276
5277 METHOD_TRACE("ips_issue_copperhead", 1);
5278
5279 if (scb->scsi_cmd) {
5280 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5281 ips_name,
5282 ha->host_num,
5283 scb->cdb[0],
5284 scb->cmd.basic_io.command_id,
5285 scb->bus, scb->target_id, scb->lun);
5286 } else {
5287 DEBUG_VAR(2, KERN_NOTICE "(%s%d) ips_issue: logical cmd id %d",
5288 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5289 }
5290
5291 TimeOut = 0;
5292
5293 while ((val =
5294 le32_to_cpu(inl(ha->io_addr + IPS_REG_CCCR))) & IPS_BIT_SEM) {
5295 udelay(1000);
5296
5297 if (++TimeOut >= IPS_SEM_TIMEOUT) {
5298 if (!(val & IPS_BIT_START_STOP))
5299 break;
5300
5301 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5302 "ips_issue val [0x%x].\n", val);
5303 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5304 "ips_issue semaphore chk timeout.\n");
5305
5306 return (IPS_FAILURE);
5307 }
5308 }
5309
5310 outl(scb->scb_busaddr, ha->io_addr + IPS_REG_CCSAR);
5311 outw(IPS_BIT_START_CMD, ha->io_addr + IPS_REG_CCCR);
5312
5313 return (IPS_SUCCESS);
5314 }
5315
5316
5317
5318
5319
5320
5321
5322
5323
5324
5325 static int
5326 ips_issue_copperhead_memio(ips_ha_t * ha, ips_scb_t * scb)
5327 {
5328 uint32_t TimeOut;
5329 uint32_t val;
5330
5331 METHOD_TRACE("ips_issue_copperhead_memio", 1);
5332
5333 if (scb->scsi_cmd) {
5334 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5335 ips_name,
5336 ha->host_num,
5337 scb->cdb[0],
5338 scb->cmd.basic_io.command_id,
5339 scb->bus, scb->target_id, scb->lun);
5340 } else {
5341 DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
5342 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5343 }
5344
5345 TimeOut = 0;
5346
5347 while ((val = readl(ha->mem_ptr + IPS_REG_CCCR)) & IPS_BIT_SEM) {
5348 udelay(1000);
5349
5350 if (++TimeOut >= IPS_SEM_TIMEOUT) {
5351 if (!(val & IPS_BIT_START_STOP))
5352 break;
5353
5354 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5355 "ips_issue val [0x%x].\n", val);
5356 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5357 "ips_issue semaphore chk timeout.\n");
5358
5359 return (IPS_FAILURE);
5360 }
5361 }
5362
5363 writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_CCSAR);
5364 writel(IPS_BIT_START_CMD, ha->mem_ptr + IPS_REG_CCCR);
5365
5366 return (IPS_SUCCESS);
5367 }
5368
5369
5370
5371
5372
5373
5374
5375
5376
5377
5378 static int
5379 ips_issue_i2o(ips_ha_t * ha, ips_scb_t * scb)
5380 {
5381
5382 METHOD_TRACE("ips_issue_i2o", 1);
5383
5384 if (scb->scsi_cmd) {
5385 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5386 ips_name,
5387 ha->host_num,
5388 scb->cdb[0],
5389 scb->cmd.basic_io.command_id,
5390 scb->bus, scb->target_id, scb->lun);
5391 } else {
5392 DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
5393 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5394 }
5395
5396 outl(scb->scb_busaddr, ha->io_addr + IPS_REG_I2O_INMSGQ);
5397
5398 return (IPS_SUCCESS);
5399 }
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410 static int
5411 ips_issue_i2o_memio(ips_ha_t * ha, ips_scb_t * scb)
5412 {
5413
5414 METHOD_TRACE("ips_issue_i2o_memio", 1);
5415
5416 if (scb->scsi_cmd) {
5417 DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
5418 ips_name,
5419 ha->host_num,
5420 scb->cdb[0],
5421 scb->cmd.basic_io.command_id,
5422 scb->bus, scb->target_id, scb->lun);
5423 } else {
5424 DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
5425 ips_name, ha->host_num, scb->cmd.basic_io.command_id);
5426 }
5427
5428 writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_I2O_INMSGQ);
5429
5430 return (IPS_SUCCESS);
5431 }
5432
5433
5434
5435
5436
5437
5438
5439
5440
5441
5442 static int
5443 ips_isintr_copperhead(ips_ha_t * ha)
5444 {
5445 uint8_t Isr;
5446
5447 METHOD_TRACE("ips_isintr_copperhead", 2);
5448
5449 Isr = inb(ha->io_addr + IPS_REG_HISR);
5450
5451 if (Isr == 0xFF)
5452
5453 return (0);
5454
5455 if (Isr & IPS_BIT_SCE)
5456 return (1);
5457 else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) {
5458
5459
5460 outb(Isr, ha->io_addr + IPS_REG_HISR);
5461 }
5462
5463 return (0);
5464 }
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475 static int
5476 ips_isintr_copperhead_memio(ips_ha_t * ha)
5477 {
5478 uint8_t Isr;
5479
5480 METHOD_TRACE("ips_isintr_memio", 2);
5481
5482 Isr = readb(ha->mem_ptr + IPS_REG_HISR);
5483
5484 if (Isr == 0xFF)
5485
5486 return (0);
5487
5488 if (Isr & IPS_BIT_SCE)
5489 return (1);
5490 else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) {
5491
5492
5493 writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
5494 }
5495
5496 return (0);
5497 }
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508 static int
5509 ips_isintr_morpheus(ips_ha_t * ha)
5510 {
5511 uint32_t Isr;
5512
5513 METHOD_TRACE("ips_isintr_morpheus", 2);
5514
5515 Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
5516
5517 if (Isr & IPS_BIT_I2O_OPQI)
5518 return (1);
5519 else
5520 return (0);
5521 }
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532 static int
5533 ips_wait(ips_ha_t * ha, int time, int intr)
5534 {
5535 int ret;
5536 int done;
5537
5538 METHOD_TRACE("ips_wait", 1);
5539
5540 ret = IPS_FAILURE;
5541 done = FALSE;
5542
5543 time *= IPS_ONE_SEC;
5544
5545 while ((time > 0) && (!done)) {
5546 if (intr == IPS_INTR_ON) {
5547 if (ha->waitflag == FALSE) {
5548 ret = IPS_SUCCESS;
5549 done = TRUE;
5550 break;
5551 }
5552 } else if (intr == IPS_INTR_IORL) {
5553 if (ha->waitflag == FALSE) {
5554
5555
5556
5557
5558
5559 ret = IPS_SUCCESS;
5560 done = TRUE;
5561 break;
5562 }
5563
5564
5565
5566
5567
5568
5569
5570 (*ha->func.intr) (ha);
5571 }
5572
5573
5574 udelay(1000);
5575 time--;
5576 }
5577
5578 return (ret);
5579 }
5580
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590 static int
5591 ips_write_driver_status(ips_ha_t * ha, int intr)
5592 {
5593 METHOD_TRACE("ips_write_driver_status", 1);
5594
5595 if (!ips_readwrite_page5(ha, FALSE, intr)) {
5596 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5597 "unable to read NVRAM page 5.\n");
5598
5599 return (0);
5600 }
5601
5602
5603
5604 if (le32_to_cpu(ha->nvram->signature) != IPS_NVRAM_P5_SIG) {
5605 DEBUG_VAR(1,
5606 "(%s%d) NVRAM page 5 has an invalid signature: %X.",
5607 ips_name, ha->host_num, ha->nvram->signature);
5608 ha->nvram->signature = IPS_NVRAM_P5_SIG;
5609 }
5610
5611 DEBUG_VAR(2,
5612 "(%s%d) Ad Type: %d, Ad Slot: %d, BIOS: %c%c%c%c %c%c%c%c.",
5613 ips_name, ha->host_num, le16_to_cpu(ha->nvram->adapter_type),
5614 ha->nvram->adapter_slot, ha->nvram->bios_high[0],
5615 ha->nvram->bios_high[1], ha->nvram->bios_high[2],
5616 ha->nvram->bios_high[3], ha->nvram->bios_low[0],
5617 ha->nvram->bios_low[1], ha->nvram->bios_low[2],
5618 ha->nvram->bios_low[3]);
5619
5620 ips_get_bios_version(ha, intr);
5621
5622
5623 ha->nvram->operating_system = IPS_OS_LINUX;
5624 ha->nvram->adapter_type = ha->ad_type;
5625 strncpy((char *) ha->nvram->driver_high, IPS_VERSION_HIGH, 4);
5626 strncpy((char *) ha->nvram->driver_low, IPS_VERSION_LOW, 4);
5627 strncpy((char *) ha->nvram->bios_high, ha->bios_version, 4);
5628 strncpy((char *) ha->nvram->bios_low, ha->bios_version + 4, 4);
5629
5630 ha->nvram->versioning = 0;
5631
5632
5633 if (!ips_readwrite_page5(ha, TRUE, intr)) {
5634 IPS_PRINTK(KERN_WARNING, ha->pcidev,
5635 "unable to write NVRAM page 5.\n");
5636
5637 return (0);
5638 }
5639
5640
5641 ha->slot_num = ha->nvram->adapter_slot;
5642
5643 return (1);
5644 }
5645
5646
5647
5648
5649
5650
5651
5652
5653
5654
5655 static int
5656 ips_read_adapter_status(ips_ha_t * ha, int intr)
5657 {
5658 ips_scb_t *scb;
5659 int ret;
5660
5661 METHOD_TRACE("ips_read_adapter_status", 1);
5662
5663 scb = &ha->scbs[ha->max_cmds - 1];
5664
5665 ips_init_scb(ha, scb);
5666
5667 scb->timeout = ips_cmd_timeout;
5668 scb->cdb[0] = IPS_CMD_ENQUIRY;
5669
5670 scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY;
5671 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
5672 scb->cmd.basic_io.sg_count = 0;
5673 scb->cmd.basic_io.lba = 0;
5674 scb->cmd.basic_io.sector_count = 0;
5675 scb->cmd.basic_io.log_drv = 0;
5676 scb->data_len = sizeof (*ha->enq);
5677 scb->cmd.basic_io.sg_addr = ha->enq_busaddr;
5678
5679
5680 if (((ret =
5681 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5682 || (ret == IPS_SUCCESS_IMM)
5683 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5684 return (0);
5685
5686 return (1);
5687 }
5688
5689
5690
5691
5692
5693
5694
5695
5696
5697
5698 static int
5699 ips_read_subsystem_parameters(ips_ha_t * ha, int intr)
5700 {
5701 ips_scb_t *scb;
5702 int ret;
5703
5704 METHOD_TRACE("ips_read_subsystem_parameters", 1);
5705
5706 scb = &ha->scbs[ha->max_cmds - 1];
5707
5708 ips_init_scb(ha, scb);
5709
5710 scb->timeout = ips_cmd_timeout;
5711 scb->cdb[0] = IPS_CMD_GET_SUBSYS;
5712
5713 scb->cmd.basic_io.op_code = IPS_CMD_GET_SUBSYS;
5714 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
5715 scb->cmd.basic_io.sg_count = 0;
5716 scb->cmd.basic_io.lba = 0;
5717 scb->cmd.basic_io.sector_count = 0;
5718 scb->cmd.basic_io.log_drv = 0;
5719 scb->data_len = sizeof (*ha->subsys);
5720 scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr;
5721
5722
5723 if (((ret =
5724 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5725 || (ret == IPS_SUCCESS_IMM)
5726 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5727 return (0);
5728
5729 memcpy(ha->subsys, ha->ioctl_data, sizeof(*ha->subsys));
5730 return (1);
5731 }
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742 static int
5743 ips_read_config(ips_ha_t * ha, int intr)
5744 {
5745 ips_scb_t *scb;
5746 int i;
5747 int ret;
5748
5749 METHOD_TRACE("ips_read_config", 1);
5750
5751
5752 for (i = 0; i < 4; i++)
5753 ha->conf->init_id[i] = 7;
5754
5755 scb = &ha->scbs[ha->max_cmds - 1];
5756
5757 ips_init_scb(ha, scb);
5758
5759 scb->timeout = ips_cmd_timeout;
5760 scb->cdb[0] = IPS_CMD_READ_CONF;
5761
5762 scb->cmd.basic_io.op_code = IPS_CMD_READ_CONF;
5763 scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
5764 scb->data_len = sizeof (*ha->conf);
5765 scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr;
5766
5767
5768 if (((ret =
5769 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5770 || (ret == IPS_SUCCESS_IMM)
5771 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
5772
5773 memset(ha->conf, 0, sizeof (IPS_CONF));
5774
5775
5776 for (i = 0; i < 4; i++)
5777 ha->conf->init_id[i] = 7;
5778
5779
5780 if ((scb->basic_status & IPS_GSC_STATUS_MASK) ==
5781 IPS_CMD_CMPLT_WERROR)
5782 return (1);
5783
5784 return (0);
5785 }
5786
5787 memcpy(ha->conf, ha->ioctl_data, sizeof(*ha->conf));
5788 return (1);
5789 }
5790
5791
5792
5793
5794
5795
5796
5797
5798
5799
5800 static int
5801 ips_readwrite_page5(ips_ha_t * ha, int write, int intr)
5802 {
5803 ips_scb_t *scb;
5804 int ret;
5805
5806 METHOD_TRACE("ips_readwrite_page5", 1);
5807
5808 scb = &ha->scbs[ha->max_cmds - 1];
5809
5810 ips_init_scb(ha, scb);
5811
5812 scb->timeout = ips_cmd_timeout;
5813 scb->cdb[0] = IPS_CMD_RW_NVRAM_PAGE;
5814
5815 scb->cmd.nvram.op_code = IPS_CMD_RW_NVRAM_PAGE;
5816 scb->cmd.nvram.command_id = IPS_COMMAND_ID(ha, scb);
5817 scb->cmd.nvram.page = 5;
5818 scb->cmd.nvram.write = write;
5819 scb->cmd.nvram.reserved = 0;
5820 scb->cmd.nvram.reserved2 = 0;
5821 scb->data_len = sizeof (*ha->nvram);
5822 scb->cmd.nvram.buffer_addr = ha->ioctl_busaddr;
5823 if (write)
5824 memcpy(ha->ioctl_data, ha->nvram, sizeof(*ha->nvram));
5825
5826
5827 if (((ret =
5828 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5829 || (ret == IPS_SUCCESS_IMM)
5830 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
5831
5832 memset(ha->nvram, 0, sizeof (IPS_NVRAM_P5));
5833
5834 return (0);
5835 }
5836 if (!write)
5837 memcpy(ha->nvram, ha->ioctl_data, sizeof(*ha->nvram));
5838 return (1);
5839 }
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850 static int
5851 ips_clear_adapter(ips_ha_t * ha, int intr)
5852 {
5853 ips_scb_t *scb;
5854 int ret;
5855
5856 METHOD_TRACE("ips_clear_adapter", 1);
5857
5858 scb = &ha->scbs[ha->max_cmds - 1];
5859
5860 ips_init_scb(ha, scb);
5861
5862 scb->timeout = ips_reset_timeout;
5863 scb->cdb[0] = IPS_CMD_CONFIG_SYNC;
5864
5865 scb->cmd.config_sync.op_code = IPS_CMD_CONFIG_SYNC;
5866 scb->cmd.config_sync.command_id = IPS_COMMAND_ID(ha, scb);
5867 scb->cmd.config_sync.channel = 0;
5868 scb->cmd.config_sync.source_target = IPS_POCL;
5869 scb->cmd.config_sync.reserved = 0;
5870 scb->cmd.config_sync.reserved2 = 0;
5871 scb->cmd.config_sync.reserved3 = 0;
5872
5873
5874 if (((ret =
5875 ips_send_wait(ha, scb, ips_reset_timeout, intr)) == IPS_FAILURE)
5876 || (ret == IPS_SUCCESS_IMM)
5877 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5878 return (0);
5879
5880
5881 ips_init_scb(ha, scb);
5882
5883 scb->cdb[0] = IPS_CMD_ERROR_TABLE;
5884 scb->timeout = ips_reset_timeout;
5885
5886 scb->cmd.unlock_stripe.op_code = IPS_CMD_ERROR_TABLE;
5887 scb->cmd.unlock_stripe.command_id = IPS_COMMAND_ID(ha, scb);
5888 scb->cmd.unlock_stripe.log_drv = 0;
5889 scb->cmd.unlock_stripe.control = IPS_CSL;
5890 scb->cmd.unlock_stripe.reserved = 0;
5891 scb->cmd.unlock_stripe.reserved2 = 0;
5892 scb->cmd.unlock_stripe.reserved3 = 0;
5893
5894
5895 if (((ret =
5896 ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
5897 || (ret == IPS_SUCCESS_IMM)
5898 || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
5899 return (0);
5900
5901 return (1);
5902 }
5903
5904
5905
5906
5907
5908
5909
5910
5911
5912
5913 static void
5914 ips_ffdc_reset(ips_ha_t * ha, int intr)
5915 {
5916 ips_scb_t *scb;
5917
5918 METHOD_TRACE("ips_ffdc_reset", 1);
5919
5920 scb = &ha->scbs[ha->max_cmds - 1];
5921
5922 ips_init_scb(ha, scb);
5923
5924 scb->timeout = ips_cmd_timeout;
5925 scb->cdb[0] = IPS_CMD_FFDC;
5926 scb->cmd.ffdc.op_code = IPS_CMD_FFDC;
5927 scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb);
5928 scb->cmd.ffdc.reset_count = ha->reset_count;
5929 scb->cmd.ffdc.reset_type = 0x80;
5930
5931
5932 ips_fix_ffdc_time(ha, scb, ha->last_ffdc);
5933
5934
5935 ips_send_wait(ha, scb, ips_cmd_timeout, intr);
5936 }
5937
5938
5939
5940
5941
5942
5943
5944
5945
5946
5947 static void
5948 ips_ffdc_time(ips_ha_t * ha)
5949 {
5950 ips_scb_t *scb;
5951
5952 METHOD_TRACE("ips_ffdc_time", 1);
5953
5954 DEBUG_VAR(1, "(%s%d) Sending time update.", ips_name, ha->host_num);
5955
5956 scb = &ha->scbs[ha->max_cmds - 1];
5957
5958 ips_init_scb(ha, scb);
5959
5960 scb->timeout = ips_cmd_timeout;
5961 scb->cdb[0] = IPS_CMD_FFDC;
5962 scb->cmd.ffdc.op_code = IPS_CMD_FFDC;
5963 scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb);
5964 scb->cmd.ffdc.reset_count = 0;
5965 scb->cmd.ffdc.reset_type = 0;
5966
5967
5968 ips_fix_ffdc_time(ha, scb, ha->last_ffdc);
5969
5970
5971 ips_send_wait(ha, scb, ips_cmd_timeout, IPS_FFDC);
5972 }
5973
5974
5975
5976
5977
5978
5979
5980
5981
5982 static void
5983 ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time64_t current_time)
5984 {
5985 struct tm tm;
5986
5987 METHOD_TRACE("ips_fix_ffdc_time", 1);
5988
5989 time64_to_tm(current_time, 0, &tm);
5990
5991 scb->cmd.ffdc.hour = tm.tm_hour;
5992 scb->cmd.ffdc.minute = tm.tm_min;
5993 scb->cmd.ffdc.second = tm.tm_sec;
5994 scb->cmd.ffdc.yearH = (tm.tm_year + 1900) / 100;
5995 scb->cmd.ffdc.yearL = tm.tm_year % 100;
5996 scb->cmd.ffdc.month = tm.tm_mon + 1;
5997 scb->cmd.ffdc.day = tm.tm_mday;
5998 }
5999
6000
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012 static int
6013 ips_erase_bios(ips_ha_t * ha)
6014 {
6015 int timeout;
6016 uint8_t status = 0;
6017
6018 METHOD_TRACE("ips_erase_bios", 1);
6019
6020 status = 0;
6021
6022
6023 outl(0, ha->io_addr + IPS_REG_FLAP);
6024 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6025 udelay(25);
6026
6027 outb(0x50, ha->io_addr + IPS_REG_FLDP);
6028 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6029 udelay(25);
6030
6031
6032 outb(0x20, ha->io_addr + IPS_REG_FLDP);
6033 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6034 udelay(25);
6035
6036
6037 outb(0xD0, ha->io_addr + IPS_REG_FLDP);
6038 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6039 udelay(25);
6040
6041
6042 outb(0x70, ha->io_addr + IPS_REG_FLDP);
6043 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6044 udelay(25);
6045
6046 timeout = 80000;
6047
6048 while (timeout > 0) {
6049 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6050 outl(0, ha->io_addr + IPS_REG_FLAP);
6051 udelay(25);
6052 }
6053
6054 status = inb(ha->io_addr + IPS_REG_FLDP);
6055
6056 if (status & 0x80)
6057 break;
6058
6059 MDELAY(1);
6060 timeout--;
6061 }
6062
6063
6064 if (timeout <= 0) {
6065
6066
6067
6068 outb(0xB0, ha->io_addr + IPS_REG_FLDP);
6069 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6070 udelay(25);
6071
6072
6073 timeout = 10000;
6074 while (timeout > 0) {
6075 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6076 outl(0, ha->io_addr + IPS_REG_FLAP);
6077 udelay(25);
6078 }
6079
6080 status = inb(ha->io_addr + IPS_REG_FLDP);
6081
6082 if (status & 0xC0)
6083 break;
6084
6085 MDELAY(1);
6086 timeout--;
6087 }
6088
6089 return (1);
6090 }
6091
6092
6093 if (status & 0x08)
6094
6095 return (1);
6096
6097
6098 if (status & 0x30)
6099
6100 return (1);
6101
6102
6103
6104 outb(0x50, ha->io_addr + IPS_REG_FLDP);
6105 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6106 udelay(25);
6107
6108
6109 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6110 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6111 udelay(25);
6112
6113 return (0);
6114 }
6115
6116
6117
6118
6119
6120
6121
6122
6123
6124 static int
6125 ips_erase_bios_memio(ips_ha_t * ha)
6126 {
6127 int timeout;
6128 uint8_t status;
6129
6130 METHOD_TRACE("ips_erase_bios_memio", 1);
6131
6132 status = 0;
6133
6134
6135 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6136 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6137 udelay(25);
6138
6139 writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
6140 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6141 udelay(25);
6142
6143
6144 writeb(0x20, ha->mem_ptr + IPS_REG_FLDP);
6145 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6146 udelay(25);
6147
6148
6149 writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP);
6150 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6151 udelay(25);
6152
6153
6154 writeb(0x70, ha->mem_ptr + IPS_REG_FLDP);
6155 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6156 udelay(25);
6157
6158 timeout = 80000;
6159
6160 while (timeout > 0) {
6161 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6162 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6163 udelay(25);
6164 }
6165
6166 status = readb(ha->mem_ptr + IPS_REG_FLDP);
6167
6168 if (status & 0x80)
6169 break;
6170
6171 MDELAY(1);
6172 timeout--;
6173 }
6174
6175
6176 if (timeout <= 0) {
6177
6178
6179
6180 writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP);
6181 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6182 udelay(25);
6183
6184
6185 timeout = 10000;
6186 while (timeout > 0) {
6187 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6188 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6189 udelay(25);
6190 }
6191
6192 status = readb(ha->mem_ptr + IPS_REG_FLDP);
6193
6194 if (status & 0xC0)
6195 break;
6196
6197 MDELAY(1);
6198 timeout--;
6199 }
6200
6201 return (1);
6202 }
6203
6204
6205 if (status & 0x08)
6206
6207 return (1);
6208
6209
6210 if (status & 0x30)
6211
6212 return (1);
6213
6214
6215
6216 writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
6217 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6218 udelay(25);
6219
6220
6221 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6222 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6223 udelay(25);
6224
6225 return (0);
6226 }
6227
6228
6229
6230
6231
6232
6233
6234
6235
6236 static int
6237 ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6238 uint32_t offset)
6239 {
6240 int i;
6241 int timeout;
6242 uint8_t status = 0;
6243
6244 METHOD_TRACE("ips_program_bios", 1);
6245
6246 status = 0;
6247
6248 for (i = 0; i < buffersize; i++) {
6249
6250 outl(i + offset, ha->io_addr + IPS_REG_FLAP);
6251 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6252 udelay(25);
6253
6254 outb(0x40, ha->io_addr + IPS_REG_FLDP);
6255 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6256 udelay(25);
6257
6258 outb(buffer[i], ha->io_addr + IPS_REG_FLDP);
6259 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6260 udelay(25);
6261
6262
6263 timeout = 1000;
6264 while (timeout > 0) {
6265 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6266 outl(0, ha->io_addr + IPS_REG_FLAP);
6267 udelay(25);
6268 }
6269
6270 status = inb(ha->io_addr + IPS_REG_FLDP);
6271
6272 if (status & 0x80)
6273 break;
6274
6275 MDELAY(1);
6276 timeout--;
6277 }
6278
6279 if (timeout == 0) {
6280
6281 outl(0, ha->io_addr + IPS_REG_FLAP);
6282 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6283 udelay(25);
6284
6285 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6286 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6287 udelay(25);
6288
6289 return (1);
6290 }
6291
6292
6293 if (status & 0x18) {
6294
6295 outl(0, ha->io_addr + IPS_REG_FLAP);
6296 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6297 udelay(25);
6298
6299 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6300 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6301 udelay(25);
6302
6303 return (1);
6304 }
6305 }
6306
6307
6308 outl(0, ha->io_addr + IPS_REG_FLAP);
6309 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6310 udelay(25);
6311
6312 outb(0xFF, ha->io_addr + IPS_REG_FLDP);
6313 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6314 udelay(25);
6315
6316 return (0);
6317 }
6318
6319
6320
6321
6322
6323
6324
6325
6326
6327 static int
6328 ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6329 uint32_t offset)
6330 {
6331 int i;
6332 int timeout;
6333 uint8_t status = 0;
6334
6335 METHOD_TRACE("ips_program_bios_memio", 1);
6336
6337 status = 0;
6338
6339 for (i = 0; i < buffersize; i++) {
6340
6341 writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
6342 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6343 udelay(25);
6344
6345 writeb(0x40, ha->mem_ptr + IPS_REG_FLDP);
6346 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6347 udelay(25);
6348
6349 writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP);
6350 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6351 udelay(25);
6352
6353
6354 timeout = 1000;
6355 while (timeout > 0) {
6356 if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
6357 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6358 udelay(25);
6359 }
6360
6361 status = readb(ha->mem_ptr + IPS_REG_FLDP);
6362
6363 if (status & 0x80)
6364 break;
6365
6366 MDELAY(1);
6367 timeout--;
6368 }
6369
6370 if (timeout == 0) {
6371
6372 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6373 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6374 udelay(25);
6375
6376 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6377 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6378 udelay(25);
6379
6380 return (1);
6381 }
6382
6383
6384 if (status & 0x18) {
6385
6386 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6387 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6388 udelay(25);
6389
6390 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6391 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6392 udelay(25);
6393
6394 return (1);
6395 }
6396 }
6397
6398
6399 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6400 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6401 udelay(25);
6402
6403 writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
6404 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6405 udelay(25);
6406
6407 return (0);
6408 }
6409
6410
6411
6412
6413
6414
6415
6416
6417
6418 static int
6419 ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6420 uint32_t offset)
6421 {
6422 uint8_t checksum;
6423 int i;
6424
6425 METHOD_TRACE("ips_verify_bios", 1);
6426
6427
6428 outl(0, ha->io_addr + IPS_REG_FLAP);
6429 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6430 udelay(25);
6431
6432 if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
6433 return (1);
6434
6435 outl(1, ha->io_addr + IPS_REG_FLAP);
6436 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6437 udelay(25);
6438 if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
6439 return (1);
6440
6441 checksum = 0xff;
6442 for (i = 2; i < buffersize; i++) {
6443
6444 outl(i + offset, ha->io_addr + IPS_REG_FLAP);
6445 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6446 udelay(25);
6447
6448 checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP);
6449 }
6450
6451 if (checksum != 0)
6452
6453 return (1);
6454 else
6455
6456 return (0);
6457 }
6458
6459
6460
6461
6462
6463
6464
6465
6466
6467 static int
6468 ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
6469 uint32_t offset)
6470 {
6471 uint8_t checksum;
6472 int i;
6473
6474 METHOD_TRACE("ips_verify_bios_memio", 1);
6475
6476
6477 writel(0, ha->mem_ptr + IPS_REG_FLAP);
6478 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6479 udelay(25);
6480
6481 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
6482 return (1);
6483
6484 writel(1, ha->mem_ptr + IPS_REG_FLAP);
6485 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6486 udelay(25);
6487 if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
6488 return (1);
6489
6490 checksum = 0xff;
6491 for (i = 2; i < buffersize; i++) {
6492
6493 writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
6494 if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
6495 udelay(25);
6496
6497 checksum =
6498 (uint8_t) checksum + readb(ha->mem_ptr + IPS_REG_FLDP);
6499 }
6500
6501 if (checksum != 0)
6502
6503 return (1);
6504 else
6505
6506 return (0);
6507 }
6508
6509
6510
6511
6512
6513
6514
6515
6516 static int
6517 ips_abort_init(ips_ha_t * ha, int index)
6518 {
6519 ha->active = 0;
6520 ips_free(ha);
6521 ips_ha[index] = NULL;
6522 ips_sh[index] = NULL;
6523 return -1;
6524 }
6525
6526
6527
6528
6529
6530
6531
6532
6533 static void
6534 ips_shift_controllers(int lowindex, int highindex)
6535 {
6536 ips_ha_t *ha_sav = ips_ha[highindex];
6537 struct Scsi_Host *sh_sav = ips_sh[highindex];
6538 int i;
6539
6540 for (i = highindex; i > lowindex; i--) {
6541 ips_ha[i] = ips_ha[i - 1];
6542 ips_sh[i] = ips_sh[i - 1];
6543 ips_ha[i]->host_num = i;
6544 }
6545 ha_sav->host_num = lowindex;
6546 ips_ha[lowindex] = ha_sav;
6547 ips_sh[lowindex] = sh_sav;
6548 }
6549
6550
6551
6552
6553
6554
6555
6556
6557 static void
6558 ips_order_controllers(void)
6559 {
6560 int i, j, tmp, position = 0;
6561 IPS_NVRAM_P5 *nvram;
6562 if (!ips_ha[0])
6563 return;
6564 nvram = ips_ha[0]->nvram;
6565
6566 if (nvram->adapter_order[0]) {
6567 for (i = 1; i <= nvram->adapter_order[0]; i++) {
6568 for (j = position; j < ips_num_controllers; j++) {
6569 switch (ips_ha[j]->ad_type) {
6570 case IPS_ADTYPE_SERVERAID6M:
6571 case IPS_ADTYPE_SERVERAID7M:
6572 if (nvram->adapter_order[i] == 'M') {
6573 ips_shift_controllers(position,
6574 j);
6575 position++;
6576 }
6577 break;
6578 case IPS_ADTYPE_SERVERAID4L:
6579 case IPS_ADTYPE_SERVERAID4M:
6580 case IPS_ADTYPE_SERVERAID4MX:
6581 case IPS_ADTYPE_SERVERAID4LX:
6582 if (nvram->adapter_order[i] == 'N') {
6583 ips_shift_controllers(position,
6584 j);
6585 position++;
6586 }
6587 break;
6588 case IPS_ADTYPE_SERVERAID6I:
6589 case IPS_ADTYPE_SERVERAID5I2:
6590 case IPS_ADTYPE_SERVERAID5I1:
6591 case IPS_ADTYPE_SERVERAID7k:
6592 if (nvram->adapter_order[i] == 'S') {
6593 ips_shift_controllers(position,
6594 j);
6595 position++;
6596 }
6597 break;
6598 case IPS_ADTYPE_SERVERAID:
6599 case IPS_ADTYPE_SERVERAID2:
6600 case IPS_ADTYPE_NAVAJO:
6601 case IPS_ADTYPE_KIOWA:
6602 case IPS_ADTYPE_SERVERAID3L:
6603 case IPS_ADTYPE_SERVERAID3:
6604 case IPS_ADTYPE_SERVERAID4H:
6605 if (nvram->adapter_order[i] == 'A') {
6606 ips_shift_controllers(position,
6607 j);
6608 position++;
6609 }
6610 break;
6611 default:
6612 break;
6613 }
6614 }
6615 }
6616
6617 return;
6618 }
6619
6620 tmp = 0;
6621 for (i = position; i < ips_num_controllers; i++) {
6622 if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I2 ||
6623 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I1) {
6624 ips_shift_controllers(position, i);
6625 position++;
6626 tmp = 1;
6627 }
6628 }
6629
6630 if (!tmp)
6631 return;
6632 for (i = position; i < ips_num_controllers; i++) {
6633 if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4L ||
6634 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4M ||
6635 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4LX ||
6636 ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4MX) {
6637 ips_shift_controllers(position, i);
6638 position++;
6639 }
6640 }
6641
6642 return;
6643 }
6644
6645
6646
6647
6648
6649
6650
6651
6652 static int
6653 ips_register_scsi(int index)
6654 {
6655 struct Scsi_Host *sh;
6656 ips_ha_t *ha, *oldha = ips_ha[index];
6657 sh = scsi_host_alloc(&ips_driver_template, sizeof (ips_ha_t));
6658 if (!sh) {
6659 IPS_PRINTK(KERN_WARNING, oldha->pcidev,
6660 "Unable to register controller with SCSI subsystem\n");
6661 return -1;
6662 }
6663 ha = IPS_HA(sh);
6664 memcpy(ha, oldha, sizeof (ips_ha_t));
6665 free_irq(oldha->pcidev->irq, oldha);
6666
6667 if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
6668 IPS_PRINTK(KERN_WARNING, ha->pcidev,
6669 "Unable to install interrupt handler\n");
6670 goto err_out_sh;
6671 }
6672
6673 kfree(oldha);
6674
6675
6676 sh->unique_id = (ha->io_addr) ? ha->io_addr : ha->mem_addr;
6677 sh->sg_tablesize = sh->hostt->sg_tablesize;
6678 sh->can_queue = sh->hostt->can_queue;
6679 sh->cmd_per_lun = sh->hostt->cmd_per_lun;
6680 sh->max_sectors = 128;
6681
6682 sh->max_id = ha->ntargets;
6683 sh->max_lun = ha->nlun;
6684 sh->max_channel = ha->nbus - 1;
6685 sh->can_queue = ha->max_cmds - 1;
6686
6687 if (scsi_add_host(sh, &ha->pcidev->dev))
6688 goto err_out;
6689
6690 ips_sh[index] = sh;
6691 ips_ha[index] = ha;
6692
6693 scsi_scan_host(sh);
6694
6695 return 0;
6696
6697 err_out:
6698 free_irq(ha->pcidev->irq, ha);
6699 err_out_sh:
6700 scsi_host_put(sh);
6701 return -1;
6702 }
6703
6704
6705
6706
6707
6708
6709
6710 static void
6711 ips_remove_device(struct pci_dev *pci_dev)
6712 {
6713 struct Scsi_Host *sh = pci_get_drvdata(pci_dev);
6714
6715 pci_set_drvdata(pci_dev, NULL);
6716
6717 ips_release(sh);
6718
6719 pci_release_regions(pci_dev);
6720 pci_disable_device(pci_dev);
6721 }
6722
6723
6724
6725
6726
6727
6728
6729
6730 static int __init
6731 ips_module_init(void)
6732 {
6733 #if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__)
6734 printk(KERN_ERR "ips: This driver has only been tested on the x86/ia64/x86_64 platforms\n");
6735 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
6736 #endif
6737
6738 if (pci_register_driver(&ips_pci_driver) < 0)
6739 return -ENODEV;
6740 ips_driver_template.module = THIS_MODULE;
6741 ips_order_controllers();
6742 if (!ips_detect(&ips_driver_template)) {
6743 pci_unregister_driver(&ips_pci_driver);
6744 return -ENODEV;
6745 }
6746 register_reboot_notifier(&ips_notifier);
6747 return 0;
6748 }
6749
6750
6751
6752
6753
6754
6755
6756
6757 static void __exit
6758 ips_module_exit(void)
6759 {
6760 pci_unregister_driver(&ips_pci_driver);
6761 unregister_reboot_notifier(&ips_notifier);
6762 }
6763
6764 module_init(ips_module_init);
6765 module_exit(ips_module_exit);
6766
6767
6768
6769
6770
6771
6772
6773
6774
6775
6776 static int
6777 ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent)
6778 {
6779 int index = -1;
6780 int rc;
6781
6782 METHOD_TRACE("ips_insert_device", 1);
6783 rc = pci_enable_device(pci_dev);
6784 if (rc)
6785 return rc;
6786
6787 rc = pci_request_regions(pci_dev, "ips");
6788 if (rc)
6789 goto err_out;
6790
6791 rc = ips_init_phase1(pci_dev, &index);
6792 if (rc == SUCCESS)
6793 rc = ips_init_phase2(index);
6794
6795 if (ips_hotplug)
6796 if (ips_register_scsi(index)) {
6797 ips_free(ips_ha[index]);
6798 rc = -1;
6799 }
6800
6801 if (rc == SUCCESS)
6802 ips_num_controllers++;
6803
6804 ips_next_controller = ips_num_controllers;
6805
6806 if (rc < 0) {
6807 rc = -ENODEV;
6808 goto err_out_regions;
6809 }
6810
6811 pci_set_drvdata(pci_dev, ips_sh[index]);
6812 return 0;
6813
6814 err_out_regions:
6815 pci_release_regions(pci_dev);
6816 err_out:
6817 pci_disable_device(pci_dev);
6818 return rc;
6819 }
6820
6821
6822
6823
6824
6825
6826
6827
6828
6829
6830 static int
6831 ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
6832 {
6833 ips_ha_t *ha;
6834 uint32_t io_addr;
6835 uint32_t mem_addr;
6836 uint32_t io_len;
6837 uint32_t mem_len;
6838 uint8_t bus;
6839 uint8_t func;
6840 int j;
6841 int index;
6842 dma_addr_t dma_address;
6843 char __iomem *ioremap_ptr;
6844 char __iomem *mem_ptr;
6845 uint32_t IsDead;
6846
6847 METHOD_TRACE("ips_init_phase1", 1);
6848 index = IPS_MAX_ADAPTERS;
6849 for (j = 0; j < IPS_MAX_ADAPTERS; j++) {
6850 if (ips_ha[j] == NULL) {
6851 index = j;
6852 break;
6853 }
6854 }
6855
6856 if (index >= IPS_MAX_ADAPTERS)
6857 return -1;
6858
6859
6860 bus = pci_dev->bus->number;
6861 func = pci_dev->devfn;
6862
6863
6864 mem_addr = 0;
6865 io_addr = 0;
6866 mem_len = 0;
6867 io_len = 0;
6868
6869 for (j = 0; j < 2; j++) {
6870 if (!pci_resource_start(pci_dev, j))
6871 break;
6872
6873 if (pci_resource_flags(pci_dev, j) & IORESOURCE_IO) {
6874 io_addr = pci_resource_start(pci_dev, j);
6875 io_len = pci_resource_len(pci_dev, j);
6876 } else {
6877 mem_addr = pci_resource_start(pci_dev, j);
6878 mem_len = pci_resource_len(pci_dev, j);
6879 }
6880 }
6881
6882
6883 if (mem_addr) {
6884 uint32_t base;
6885 uint32_t offs;
6886
6887 base = mem_addr & PAGE_MASK;
6888 offs = mem_addr - base;
6889 ioremap_ptr = ioremap(base, PAGE_SIZE);
6890 if (!ioremap_ptr)
6891 return -1;
6892 mem_ptr = ioremap_ptr + offs;
6893 } else {
6894 ioremap_ptr = NULL;
6895 mem_ptr = NULL;
6896 }
6897
6898
6899 ha = kzalloc(sizeof (ips_ha_t), GFP_KERNEL);
6900 if (ha == NULL) {
6901 IPS_PRINTK(KERN_WARNING, pci_dev,
6902 "Unable to allocate temporary ha struct\n");
6903 return -1;
6904 }
6905
6906 ips_sh[index] = NULL;
6907 ips_ha[index] = ha;
6908 ha->active = 1;
6909
6910
6911 ha->io_addr = io_addr;
6912 ha->io_len = io_len;
6913 ha->mem_addr = mem_addr;
6914 ha->mem_len = mem_len;
6915 ha->mem_ptr = mem_ptr;
6916 ha->ioremap_ptr = ioremap_ptr;
6917 ha->host_num = (uint32_t) index;
6918 ha->slot_num = PCI_SLOT(pci_dev->devfn);
6919 ha->pcidev = pci_dev;
6920
6921
6922
6923
6924
6925
6926
6927 if (sizeof(dma_addr_t) > 4 && IPS_HAS_ENH_SGLIST(ha) &&
6928 !dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(64))) {
6929 (ha)->flags |= IPS_HA_ENH_SG;
6930 } else {
6931 if (dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(32)) != 0) {
6932 printk(KERN_WARNING "Unable to set DMA Mask\n");
6933 return ips_abort_init(ha, index);
6934 }
6935 }
6936 if(ips_cd_boot && !ips_FlashData){
6937 ips_FlashData = dma_alloc_coherent(&pci_dev->dev,
6938 PAGE_SIZE << 7, &ips_flashbusaddr, GFP_KERNEL);
6939 }
6940
6941 ha->enq = dma_alloc_coherent(&pci_dev->dev, sizeof (IPS_ENQ),
6942 &ha->enq_busaddr, GFP_KERNEL);
6943 if (!ha->enq) {
6944 IPS_PRINTK(KERN_WARNING, pci_dev,
6945 "Unable to allocate host inquiry structure\n");
6946 return ips_abort_init(ha, index);
6947 }
6948
6949 ha->adapt = dma_alloc_coherent(&pci_dev->dev,
6950 sizeof (IPS_ADAPTER) + sizeof (IPS_IO_CMD),
6951 &dma_address, GFP_KERNEL);
6952 if (!ha->adapt) {
6953 IPS_PRINTK(KERN_WARNING, pci_dev,
6954 "Unable to allocate host adapt & dummy structures\n");
6955 return ips_abort_init(ha, index);
6956 }
6957 ha->adapt->hw_status_start = dma_address;
6958 ha->dummy = (void *) (ha->adapt + 1);
6959
6960
6961
6962 ha->logical_drive_info = dma_alloc_coherent(&pci_dev->dev,
6963 sizeof (IPS_LD_INFO), &dma_address, GFP_KERNEL);
6964 if (!ha->logical_drive_info) {
6965 IPS_PRINTK(KERN_WARNING, pci_dev,
6966 "Unable to allocate logical drive info structure\n");
6967 return ips_abort_init(ha, index);
6968 }
6969 ha->logical_drive_info_dma_addr = dma_address;
6970
6971
6972 ha->conf = kmalloc(sizeof (IPS_CONF), GFP_KERNEL);
6973
6974 if (!ha->conf) {
6975 IPS_PRINTK(KERN_WARNING, pci_dev,
6976 "Unable to allocate host conf structure\n");
6977 return ips_abort_init(ha, index);
6978 }
6979
6980 ha->nvram = kmalloc(sizeof (IPS_NVRAM_P5), GFP_KERNEL);
6981
6982 if (!ha->nvram) {
6983 IPS_PRINTK(KERN_WARNING, pci_dev,
6984 "Unable to allocate host NVRAM structure\n");
6985 return ips_abort_init(ha, index);
6986 }
6987
6988 ha->subsys = kmalloc(sizeof (IPS_SUBSYS), GFP_KERNEL);
6989
6990 if (!ha->subsys) {
6991 IPS_PRINTK(KERN_WARNING, pci_dev,
6992 "Unable to allocate host subsystem structure\n");
6993 return ips_abort_init(ha, index);
6994 }
6995
6996
6997
6998 if (ips_ioctlsize < PAGE_SIZE)
6999 ips_ioctlsize = PAGE_SIZE;
7000
7001 ha->ioctl_data = dma_alloc_coherent(&pci_dev->dev, ips_ioctlsize,
7002 &ha->ioctl_busaddr, GFP_KERNEL);
7003 ha->ioctl_len = ips_ioctlsize;
7004 if (!ha->ioctl_data) {
7005 IPS_PRINTK(KERN_WARNING, pci_dev,
7006 "Unable to allocate IOCTL data\n");
7007 return ips_abort_init(ha, index);
7008 }
7009
7010
7011
7012
7013 ips_setup_funclist(ha);
7014
7015 if ((IPS_IS_MORPHEUS(ha)) || (IPS_IS_MARCO(ha))) {
7016
7017 IsDead = readl(ha->mem_ptr + IPS_REG_I960_MSG1);
7018 if (IsDead == 0xDEADBEEF) {
7019 ips_reset_morpheus(ha);
7020 }
7021 }
7022
7023
7024
7025
7026
7027 if (!(*ha->func.isinit) (ha)) {
7028 if (!(*ha->func.init) (ha)) {
7029
7030
7031
7032 IPS_PRINTK(KERN_WARNING, pci_dev,
7033 "Unable to initialize controller\n");
7034 return ips_abort_init(ha, index);
7035 }
7036 }
7037
7038 *indexPtr = index;
7039 return SUCCESS;
7040 }
7041
7042
7043
7044
7045
7046
7047
7048
7049
7050
7051 static int
7052 ips_init_phase2(int index)
7053 {
7054 ips_ha_t *ha;
7055
7056 ha = ips_ha[index];
7057
7058 METHOD_TRACE("ips_init_phase2", 1);
7059 if (!ha->active) {
7060 ips_ha[index] = NULL;
7061 return -1;
7062 }
7063
7064
7065 if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
7066 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7067 "Unable to install interrupt handler\n");
7068 return ips_abort_init(ha, index);
7069 }
7070
7071
7072
7073
7074 ha->max_cmds = 1;
7075 if (!ips_allocatescbs(ha)) {
7076 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7077 "Unable to allocate a CCB\n");
7078 free_irq(ha->pcidev->irq, ha);
7079 return ips_abort_init(ha, index);
7080 }
7081
7082 if (!ips_hainit(ha)) {
7083 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7084 "Unable to initialize controller\n");
7085 free_irq(ha->pcidev->irq, ha);
7086 return ips_abort_init(ha, index);
7087 }
7088
7089 ips_deallocatescbs(ha, 1);
7090
7091
7092 if (!ips_allocatescbs(ha)) {
7093 IPS_PRINTK(KERN_WARNING, ha->pcidev,
7094 "Unable to allocate CCBs\n");
7095 free_irq(ha->pcidev->irq, ha);
7096 return ips_abort_init(ha, index);
7097 }
7098
7099 return SUCCESS;
7100 }
7101
7102 MODULE_LICENSE("GPL");
7103 MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING);
7104 MODULE_VERSION(IPS_VER_STRING);
7105
7106
7107
7108
7109
7110
7111
7112
7113
7114
7115
7116
7117
7118
7119
7120
7121
7122
7123
7124