This source file includes following definitions.
- set_safe_settings
- fix_settings
- eeprom_index_to_delay
- delay_to_eeprom_index
- eeprom_override
- list_size
- dcb_get_next
- free_tag
- find_cmd
- waiting_set_timer
- waiting_process_next
- waiting_timeout
- find_dcb
- send_srb
- build_srb
- dc395x_queue_command_lck
- DEF_SCSI_QCMD
- dump_register_info
- clear_fifo
- reset_dev_param
- __dc395x_eh_bus_reset
- dc395x_eh_bus_reset
- dc395x_eh_abort
- build_sdtr
- build_wdtr
- selto_timer
- selection_timeout_missed
- start_scsi
- enable_msgout_abort
- dc395x_handle_interrupt
- dc395x_interrupt
- msgout_phase0
- msgout_phase1
- command_phase0
- command_phase1
- sg_verify_length
- sg_update_list
- sg_subtract_one
- cleanup_after_transfer
- data_out_phase0
- data_out_phase1
- data_in_phase0
- data_in_phase1
- data_io_transfer
- status_phase0
- status_phase1
- msgin_completed
- msgin_reject
- msgin_qtag
- reprogram_regs
- msgin_set_async
- msgin_set_sync
- msgin_set_nowide
- msgin_set_wide
- msgin_phase0
- msgin_phase1
- nop0
- nop1
- set_xfer_rate
- disconnect
- reselect
- tagq_blacklist
- disc_tagq_set
- add_dev
- pci_unmap_srb
- pci_unmap_srb_sense
- srb_done
- doing_srb_done
- reset_scsi_bus
- set_basic_config
- scsi_reset_detect
- request_sense
- device_alloc
- adapter_add_device
- adapter_remove_device
- adapter_remove_and_free_device
- adapter_remove_and_free_all_devices
- dc395x_slave_alloc
- dc395x_slave_destroy
- trms1040_wait_30us
- trms1040_write_cmd
- trms1040_set_data
- trms1040_write_all
- trms1040_get_data
- trms1040_read_all
- check_eeprom
- print_eeprom_settings
- adapter_sg_tables_free
- adapter_sg_tables_alloc
- adapter_print_config
- adapter_init_params
- adapter_init_scsi_host
- adapter_init_chip
- adapter_init
- adapter_uninit_chip
- adapter_uninit
- dc395x_show_info
- banner_display
- dc395x_init_one
- dc395x_remove_one
- dc395x_module_init
- dc395x_module_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/delay.h>
52 #include <linux/ctype.h>
53 #include <linux/blkdev.h>
54 #include <linux/interrupt.h>
55 #include <linux/init.h>
56 #include <linux/spinlock.h>
57 #include <linux/pci.h>
58 #include <linux/list.h>
59 #include <linux/vmalloc.h>
60 #include <linux/slab.h>
61 #include <asm/io.h>
62
63 #include <scsi/scsi.h>
64 #include <scsi/scsicam.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_device.h>
67 #include <scsi/scsi_host.h>
68
69 #include "dc395x.h"
70
71 #define DC395X_NAME "dc395x"
72 #define DC395X_BANNER "Tekram DC395(U/UW/F), DC315(U) - ASIC TRM-S1040"
73 #define DC395X_VERSION "v2.05, 2004/03/08"
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92 #define DBG_KG 0x0001
93 #define DBG_0 0x0002
94 #define DBG_1 0x0004
95 #define DBG_SG 0x0020
96 #define DBG_FIFO 0x0040
97 #define DBG_PIO 0x0080
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112 #define dprintkl(level, format, arg...) \
113 printk(level DC395X_NAME ": " format , ## arg)
114
115
116 #ifdef DEBUG_MASK
117
118
119
120
121
122
123 #define dprintkdbg(type, format, arg...) \
124 do { \
125 if ((type) & (DEBUG_MASK)) \
126 dprintkl(KERN_DEBUG , format , ## arg); \
127 } while (0)
128
129
130
131
132 #define debug_enabled(type) ((DEBUG_MASK) & (type))
133
134 #else
135
136
137
138 #define dprintkdbg(type, format, arg...) \
139 do {} while (0)
140 #define debug_enabled(type) (0)
141
142 #endif
143
144
145 #ifndef PCI_VENDOR_ID_TEKRAM
146 #define PCI_VENDOR_ID_TEKRAM 0x1DE1
147 #endif
148 #ifndef PCI_DEVICE_ID_TEKRAM_TRMS1040
149 #define PCI_DEVICE_ID_TEKRAM_TRMS1040 0x0391
150 #endif
151
152
153 #define DC395x_LOCK_IO(dev,flags) spin_lock_irqsave(((struct Scsi_Host *)dev)->host_lock, flags)
154 #define DC395x_UNLOCK_IO(dev,flags) spin_unlock_irqrestore(((struct Scsi_Host *)dev)->host_lock, flags)
155
156 #define DC395x_read8(acb,address) (u8)(inb(acb->io_port_base + (address)))
157 #define DC395x_read16(acb,address) (u16)(inw(acb->io_port_base + (address)))
158 #define DC395x_read32(acb,address) (u32)(inl(acb->io_port_base + (address)))
159 #define DC395x_write8(acb,address,value) outb((value), acb->io_port_base + (address))
160 #define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
161 #define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
162
163
164 #define RES_TARGET 0x000000FF
165 #define RES_TARGET_LNX STATUS_MASK
166 #define RES_ENDMSG 0x0000FF00
167 #define RES_DID 0x00FF0000
168 #define RES_DRV 0xFF000000
169
170 #define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
171 #define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
172
173 #define SET_RES_TARGET(who,tgt) { who &= ~RES_TARGET; who |= (int)(tgt); }
174 #define SET_RES_TARGET_LNX(who,tgt) { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; }
175 #define SET_RES_MSG(who,msg) { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; }
176 #define SET_RES_DID(who,did) { who &= ~RES_DID; who |= (int)(did) << 16; }
177 #define SET_RES_DRV(who,drv) { who &= ~RES_DRV; who |= (int)(drv) << 24; }
178
179 #define TAG_NONE 255
180
181
182
183
184
185
186 #define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
187
188
189 struct SGentry {
190 u32 address;
191 u32 length;
192 };
193
194
195 struct NVRamTarget {
196 u8 cfg0;
197 u8 period;
198 u8 cfg2;
199 u8 cfg3;
200 };
201
202 struct NvRamType {
203 u8 sub_vendor_id[2];
204 u8 sub_sys_id[2];
205 u8 sub_class;
206 u8 vendor_id[2];
207 u8 device_id[2];
208 u8 reserved;
209 struct NVRamTarget target[DC395x_MAX_SCSI_ID];
210
211
212
213
214
215
216 u8 scsi_id;
217 u8 channel_cfg;
218 u8 delay_time;
219 u8 max_tag;
220 u8 reserved0;
221 u8 boot_target;
222 u8 boot_lun;
223 u8 reserved1;
224 u16 reserved2[22];
225 u16 cksum;
226 };
227
228 struct ScsiReqBlk {
229 struct list_head list;
230 struct DeviceCtlBlk *dcb;
231 struct scsi_cmnd *cmd;
232
233 struct SGentry *segment_x;
234 dma_addr_t sg_bus_addr;
235
236 u8 sg_count;
237 u8 sg_index;
238 size_t total_xfer_length;
239 size_t request_length;
240
241
242
243
244
245
246
247
248
249 size_t xferred;
250
251 u16 state;
252
253 u8 msgin_buf[6];
254 u8 msgout_buf[6];
255
256 u8 adapter_status;
257 u8 target_status;
258 u8 msg_count;
259 u8 end_message;
260
261 u8 tag_number;
262 u8 status;
263 u8 retry_count;
264 u8 flag;
265
266 u8 scsi_phase;
267 };
268
269 struct DeviceCtlBlk {
270 struct list_head list;
271 struct AdapterCtlBlk *acb;
272 struct list_head srb_going_list;
273 struct list_head srb_waiting_list;
274
275 struct ScsiReqBlk *active_srb;
276 u32 tag_mask;
277
278 u16 max_command;
279
280 u8 target_id;
281 u8 target_lun;
282 u8 identify_msg;
283 u8 dev_mode;
284
285 u8 inquiry7;
286 u8 sync_mode;
287 u8 min_nego_period;
288 u8 sync_period;
289
290 u8 sync_offset;
291 u8 flag;
292 u8 dev_type;
293 u8 init_tcq_flag;
294 };
295
296 struct AdapterCtlBlk {
297 struct Scsi_Host *scsi_host;
298
299 unsigned long io_port_base;
300 unsigned long io_port_len;
301
302 struct list_head dcb_list;
303 struct DeviceCtlBlk *dcb_run_robin;
304 struct DeviceCtlBlk *active_dcb;
305
306 struct list_head srb_free_list;
307 struct ScsiReqBlk *tmp_srb;
308 struct timer_list waiting_timer;
309 struct timer_list selto_timer;
310
311 unsigned long last_reset;
312
313 u16 srb_count;
314
315 u8 sel_timeout;
316
317 unsigned int irq_level;
318 u8 tag_max_num;
319 u8 acb_flag;
320 u8 gmode2;
321
322 u8 config;
323 u8 lun_chk;
324 u8 scan_devices;
325 u8 hostid_bit;
326
327 u8 dcb_map[DC395x_MAX_SCSI_ID];
328 struct DeviceCtlBlk *children[DC395x_MAX_SCSI_ID][32];
329
330 struct pci_dev *dev;
331
332 u8 msg_len;
333
334 struct ScsiReqBlk srb_array[DC395x_MAX_SRB_CNT];
335 struct ScsiReqBlk srb;
336
337 struct NvRamType eeprom;
338 };
339
340
341
342
343
344 static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
345 u16 *pscsi_status);
346 static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
347 u16 *pscsi_status);
348 static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
349 u16 *pscsi_status);
350 static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
351 u16 *pscsi_status);
352 static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
353 u16 *pscsi_status);
354 static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
355 u16 *pscsi_status);
356 static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
357 u16 *pscsi_status);
358 static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
359 u16 *pscsi_status);
360 static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
361 u16 *pscsi_status);
362 static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
363 u16 *pscsi_status);
364 static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
365 u16 *pscsi_status);
366 static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
367 u16 *pscsi_status);
368 static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
369 u16 *pscsi_status);
370 static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
371 u16 *pscsi_status);
372 static void set_basic_config(struct AdapterCtlBlk *acb);
373 static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
374 struct ScsiReqBlk *srb);
375 static void reset_scsi_bus(struct AdapterCtlBlk *acb);
376 static void data_io_transfer(struct AdapterCtlBlk *acb,
377 struct ScsiReqBlk *srb, u16 io_dir);
378 static void disconnect(struct AdapterCtlBlk *acb);
379 static void reselect(struct AdapterCtlBlk *acb);
380 static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
381 struct ScsiReqBlk *srb);
382 static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
383 struct ScsiReqBlk *srb);
384 static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
385 struct ScsiReqBlk *srb);
386 static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_code,
387 struct scsi_cmnd *cmd, u8 force);
388 static void scsi_reset_detect(struct AdapterCtlBlk *acb);
389 static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb);
390 static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
391 struct ScsiReqBlk *srb);
392 static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
393 struct ScsiReqBlk *srb);
394 static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
395 struct ScsiReqBlk *srb);
396 static void set_xfer_rate(struct AdapterCtlBlk *acb,
397 struct DeviceCtlBlk *dcb);
398 static void waiting_timeout(struct timer_list *t);
399
400
401
402
403
404 static u16 current_sync_offset = 0;
405
406 static void *dc395x_scsi_phase0[] = {
407 data_out_phase0,
408 data_in_phase0,
409 command_phase0,
410 status_phase0,
411 nop0,
412 nop0,
413 msgout_phase0,
414 msgin_phase0,
415 };
416
417 static void *dc395x_scsi_phase1[] = {
418 data_out_phase1,
419 data_in_phase1,
420 command_phase1,
421 status_phase1,
422 nop1,
423 nop1,
424 msgout_phase1,
425 msgin_phase1,
426 };
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450 static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 };
451 static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 };
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467 #define CFG_ADAPTER_ID 0
468 #define CFG_MAX_SPEED 1
469 #define CFG_DEV_MODE 2
470 #define CFG_ADAPTER_MODE 3
471 #define CFG_TAGS 4
472 #define CFG_RESET_DELAY 5
473
474 #define CFG_NUM 6
475
476
477
478
479
480
481 #define CFG_PARAM_UNSET -1
482
483
484
485
486
487 struct ParameterData {
488 int value;
489 int min;
490 int max;
491 int def;
492 int safe;
493 };
494 static struct ParameterData cfg_data[] = {
495 {
496 CFG_PARAM_UNSET,
497 0,
498 15,
499 7,
500 7
501 },
502 {
503 CFG_PARAM_UNSET,
504 0,
505 7,
506 1,
507 4,
508 },
509 {
510 CFG_PARAM_UNSET,
511 0,
512 0x3f,
513 NTC_DO_PARITY_CHK | NTC_DO_DISCONNECT | NTC_DO_SYNC_NEGO |
514 NTC_DO_WIDE_NEGO | NTC_DO_TAG_QUEUEING |
515 NTC_DO_SEND_START,
516 NTC_DO_PARITY_CHK | NTC_DO_SEND_START
517 },
518 {
519 CFG_PARAM_UNSET,
520 0,
521 0x2f,
522 NAC_SCANLUN |
523 NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET
524 ,
525 NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08
526 },
527 {
528 CFG_PARAM_UNSET,
529 0,
530 5,
531 3,
532 2,
533 },
534 {
535 CFG_PARAM_UNSET,
536 0,
537 180,
538 1,
539 10,
540 }
541 };
542
543
544
545
546
547
548
549 static bool use_safe_settings = 0;
550 module_param_named(safe, use_safe_settings, bool, 0);
551 MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false");
552
553
554 module_param_named(adapter_id, cfg_data[CFG_ADAPTER_ID].value, int, 0);
555 MODULE_PARM_DESC(adapter_id, "Adapter SCSI ID. Default 7 (0-15)");
556
557 module_param_named(max_speed, cfg_data[CFG_MAX_SPEED].value, int, 0);
558 MODULE_PARM_DESC(max_speed, "Maximum bus speed. Default 1 (0-7) Speeds: 0=20, 1=13.3, 2=10, 3=8, 4=6.7, 5=5.8, 6=5, 7=4 Mhz");
559
560 module_param_named(dev_mode, cfg_data[CFG_DEV_MODE].value, int, 0);
561 MODULE_PARM_DESC(dev_mode, "Device mode.");
562
563 module_param_named(adapter_mode, cfg_data[CFG_ADAPTER_MODE].value, int, 0);
564 MODULE_PARM_DESC(adapter_mode, "Adapter mode.");
565
566 module_param_named(tags, cfg_data[CFG_TAGS].value, int, 0);
567 MODULE_PARM_DESC(tags, "Number of tags (1<<x). Default 3 (0-5)");
568
569 module_param_named(reset_delay, cfg_data[CFG_RESET_DELAY].value, int, 0);
570 MODULE_PARM_DESC(reset_delay, "Reset delay in seconds. Default 1 (0-180)");
571
572
573
574
575
576
577 static void set_safe_settings(void)
578 {
579 if (use_safe_settings)
580 {
581 int i;
582
583 dprintkl(KERN_INFO, "Using safe settings.\n");
584 for (i = 0; i < CFG_NUM; i++)
585 {
586 cfg_data[i].value = cfg_data[i].safe;
587 }
588 }
589 }
590
591
592
593
594
595
596 static void fix_settings(void)
597 {
598 int i;
599
600 dprintkdbg(DBG_1,
601 "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x "
602 "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n",
603 cfg_data[CFG_ADAPTER_ID].value,
604 cfg_data[CFG_MAX_SPEED].value,
605 cfg_data[CFG_DEV_MODE].value,
606 cfg_data[CFG_ADAPTER_MODE].value,
607 cfg_data[CFG_TAGS].value,
608 cfg_data[CFG_RESET_DELAY].value);
609 for (i = 0; i < CFG_NUM; i++)
610 {
611 if (cfg_data[i].value < cfg_data[i].min
612 || cfg_data[i].value > cfg_data[i].max)
613 cfg_data[i].value = cfg_data[i].def;
614 }
615 }
616
617
618
619
620
621
622
623 static char eeprom_index_to_delay_map[] =
624 { 1, 3, 5, 10, 16, 30, 60, 120 };
625
626
627
628
629
630
631
632
633 static void eeprom_index_to_delay(struct NvRamType *eeprom)
634 {
635 eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time];
636 }
637
638
639
640
641
642
643
644
645
646 static int delay_to_eeprom_index(int delay)
647 {
648 u8 idx = 0;
649 while (idx < 7 && eeprom_index_to_delay_map[idx] < delay)
650 idx++;
651 return idx;
652 }
653
654
655
656
657
658
659
660
661
662 static void eeprom_override(struct NvRamType *eeprom)
663 {
664 u8 id;
665
666
667 if (cfg_data[CFG_ADAPTER_ID].value != CFG_PARAM_UNSET)
668 eeprom->scsi_id = (u8)cfg_data[CFG_ADAPTER_ID].value;
669
670 if (cfg_data[CFG_ADAPTER_MODE].value != CFG_PARAM_UNSET)
671 eeprom->channel_cfg = (u8)cfg_data[CFG_ADAPTER_MODE].value;
672
673 if (cfg_data[CFG_RESET_DELAY].value != CFG_PARAM_UNSET)
674 eeprom->delay_time = delay_to_eeprom_index(
675 cfg_data[CFG_RESET_DELAY].value);
676
677 if (cfg_data[CFG_TAGS].value != CFG_PARAM_UNSET)
678 eeprom->max_tag = (u8)cfg_data[CFG_TAGS].value;
679
680
681 for (id = 0; id < DC395x_MAX_SCSI_ID; id++) {
682 if (cfg_data[CFG_DEV_MODE].value != CFG_PARAM_UNSET)
683 eeprom->target[id].cfg0 =
684 (u8)cfg_data[CFG_DEV_MODE].value;
685
686 if (cfg_data[CFG_MAX_SPEED].value != CFG_PARAM_UNSET)
687 eeprom->target[id].period =
688 (u8)cfg_data[CFG_MAX_SPEED].value;
689
690 }
691 }
692
693
694
695
696
697 static unsigned int list_size(struct list_head *head)
698 {
699 unsigned int count = 0;
700 struct list_head *pos;
701 list_for_each(pos, head)
702 count++;
703 return count;
704 }
705
706
707 static struct DeviceCtlBlk *dcb_get_next(struct list_head *head,
708 struct DeviceCtlBlk *pos)
709 {
710 int use_next = 0;
711 struct DeviceCtlBlk* next = NULL;
712 struct DeviceCtlBlk* i;
713
714 if (list_empty(head))
715 return NULL;
716
717
718 list_for_each_entry(i, head, list)
719 if (use_next) {
720 next = i;
721 break;
722 } else if (i == pos) {
723 use_next = 1;
724 }
725
726 if (!next)
727 list_for_each_entry(i, head, list) {
728 next = i;
729 break;
730 }
731
732 return next;
733 }
734
735
736 static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
737 {
738 if (srb->tag_number < 255) {
739 dcb->tag_mask &= ~(1 << srb->tag_number);
740 srb->tag_number = 255;
741 }
742 }
743
744
745
746 static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
747 struct list_head *head)
748 {
749 struct ScsiReqBlk *i;
750 list_for_each_entry(i, head, list)
751 if (i->cmd == cmd)
752 return i;
753 return NULL;
754 }
755
756
757 static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
758 {
759 if (timer_pending(&acb->waiting_timer))
760 return;
761 if (time_before(jiffies + to, acb->last_reset - HZ / 2))
762 acb->waiting_timer.expires =
763 acb->last_reset - HZ / 2 + 1;
764 else
765 acb->waiting_timer.expires = jiffies + to + 1;
766 add_timer(&acb->waiting_timer);
767 }
768
769
770
771 static void waiting_process_next(struct AdapterCtlBlk *acb)
772 {
773 struct DeviceCtlBlk *start = NULL;
774 struct DeviceCtlBlk *pos;
775 struct DeviceCtlBlk *dcb;
776 struct ScsiReqBlk *srb;
777 struct list_head *dcb_list_head = &acb->dcb_list;
778
779 if (acb->active_dcb
780 || (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV)))
781 return;
782
783 if (timer_pending(&acb->waiting_timer))
784 del_timer(&acb->waiting_timer);
785
786 if (list_empty(dcb_list_head))
787 return;
788
789
790
791
792
793 list_for_each_entry(dcb, dcb_list_head, list)
794 if (dcb == acb->dcb_run_robin) {
795 start = dcb;
796 break;
797 }
798 if (!start) {
799
800 start = list_entry(dcb_list_head->next, typeof(*start), list);
801 acb->dcb_run_robin = start;
802 }
803
804
805
806
807
808
809 pos = start;
810 do {
811 struct list_head *waiting_list_head = &pos->srb_waiting_list;
812
813
814 acb->dcb_run_robin = dcb_get_next(dcb_list_head,
815 acb->dcb_run_robin);
816
817 if (list_empty(waiting_list_head) ||
818 pos->max_command <= list_size(&pos->srb_going_list)) {
819
820 pos = dcb_get_next(dcb_list_head, pos);
821 } else {
822 srb = list_entry(waiting_list_head->next,
823 struct ScsiReqBlk, list);
824
825
826 if (!start_scsi(acb, pos, srb))
827 list_move(&srb->list, &pos->srb_going_list);
828 else
829 waiting_set_timer(acb, HZ/50);
830 break;
831 }
832 } while (pos != start);
833 }
834
835
836
837 static void waiting_timeout(struct timer_list *t)
838 {
839 unsigned long flags;
840 struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer);
841 dprintkdbg(DBG_1,
842 "waiting_timeout: Queue woken up by timer. acb=%p\n", acb);
843 DC395x_LOCK_IO(acb->scsi_host, flags);
844 waiting_process_next(acb);
845 DC395x_UNLOCK_IO(acb->scsi_host, flags);
846 }
847
848
849
850 static struct DeviceCtlBlk *find_dcb(struct AdapterCtlBlk *acb, u8 id, u8 lun)
851 {
852 return acb->children[id][lun];
853 }
854
855
856
857 static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
858 {
859 struct DeviceCtlBlk *dcb = srb->dcb;
860
861 if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
862 acb->active_dcb ||
863 (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
864 list_add_tail(&srb->list, &dcb->srb_waiting_list);
865 waiting_process_next(acb);
866 return;
867 }
868
869 if (!start_scsi(acb, dcb, srb)) {
870 list_add_tail(&srb->list, &dcb->srb_going_list);
871 } else {
872 list_add(&srb->list, &dcb->srb_waiting_list);
873 waiting_set_timer(acb, HZ / 50);
874 }
875 }
876
877
878 static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
879 struct ScsiReqBlk *srb)
880 {
881 int nseg;
882 enum dma_data_direction dir = cmd->sc_data_direction;
883 dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n",
884 cmd, dcb->target_id, dcb->target_lun);
885
886 srb->dcb = dcb;
887 srb->cmd = cmd;
888 srb->sg_count = 0;
889 srb->total_xfer_length = 0;
890 srb->sg_bus_addr = 0;
891 srb->sg_index = 0;
892 srb->adapter_status = 0;
893 srb->target_status = 0;
894 srb->msg_count = 0;
895 srb->status = 0;
896 srb->flag = 0;
897 srb->state = 0;
898 srb->retry_count = 0;
899 srb->tag_number = TAG_NONE;
900 srb->scsi_phase = PH_BUS_FREE;
901 srb->end_message = 0;
902
903 nseg = scsi_dma_map(cmd);
904 BUG_ON(nseg < 0);
905
906 if (dir == PCI_DMA_NONE || !nseg) {
907 dprintkdbg(DBG_0,
908 "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
909 cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
910 srb->segment_x[0].address);
911 } else {
912 int i;
913 u32 reqlen = scsi_bufflen(cmd);
914 struct scatterlist *sg;
915 struct SGentry *sgp = srb->segment_x;
916
917 srb->sg_count = nseg;
918
919 dprintkdbg(DBG_0,
920 "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
921 reqlen, scsi_sglist(cmd), scsi_sg_count(cmd),
922 srb->sg_count);
923
924 scsi_for_each_sg(cmd, sg, srb->sg_count, i) {
925 u32 busaddr = (u32)sg_dma_address(sg);
926 u32 seglen = (u32)sg->length;
927 sgp[i].address = busaddr;
928 sgp[i].length = seglen;
929 srb->total_xfer_length += seglen;
930 }
931 sgp += srb->sg_count - 1;
932
933
934
935
936
937 if (srb->total_xfer_length > reqlen) {
938 sgp->length -= (srb->total_xfer_length - reqlen);
939 srb->total_xfer_length = reqlen;
940 }
941
942
943 if (dcb->sync_period & WIDE_SYNC &&
944 srb->total_xfer_length % 2) {
945 srb->total_xfer_length++;
946 sgp->length++;
947 }
948
949 srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev,
950 srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE);
951
952 dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
953 srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
954 }
955
956 srb->request_length = srb->total_xfer_length;
957 }
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979 static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
980 {
981 struct DeviceCtlBlk *dcb;
982 struct ScsiReqBlk *srb;
983 struct AdapterCtlBlk *acb =
984 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
985 dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n",
986 cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]);
987
988
989 cmd->result = DID_BAD_TARGET << 16;
990
991
992 if (cmd->device->id >= acb->scsi_host->max_id ||
993 cmd->device->lun >= acb->scsi_host->max_lun ||
994 cmd->device->lun >31) {
995 goto complete;
996 }
997
998
999 if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) {
1000 dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n",
1001 cmd->device->id, (u8)cmd->device->lun);
1002 goto complete;
1003 }
1004
1005
1006 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
1007 if (!dcb) {
1008
1009 dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>",
1010 cmd->device->id, (u8)cmd->device->lun);
1011 goto complete;
1012 }
1013
1014
1015 cmd->scsi_done = done;
1016 cmd->result = 0;
1017
1018 srb = list_first_entry_or_null(&acb->srb_free_list,
1019 struct ScsiReqBlk, list);
1020 if (!srb) {
1021
1022
1023
1024
1025 dprintkdbg(DBG_0, "queue_command: No free srb's\n");
1026 return 1;
1027 }
1028 list_del(&srb->list);
1029
1030 build_srb(cmd, dcb, srb);
1031
1032 if (!list_empty(&dcb->srb_waiting_list)) {
1033
1034 list_add_tail(&srb->list, &dcb->srb_waiting_list);
1035 waiting_process_next(acb);
1036 } else {
1037
1038 send_srb(acb, srb);
1039 }
1040 dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd);
1041 return 0;
1042
1043 complete:
1044
1045
1046
1047
1048
1049
1050 done(cmd);
1051 return 0;
1052 }
1053
1054 static DEF_SCSI_QCMD(dc395x_queue_command)
1055
1056
1057
1058
1059 static int dc395x_bios_param(struct scsi_device *sdev,
1060 struct block_device *bdev, sector_t capacity, int *info)
1061 {
1062 #ifdef CONFIG_SCSI_DC395x_TRMS1040_TRADMAP
1063 int heads, sectors, cylinders;
1064 struct AdapterCtlBlk *acb;
1065 int size = capacity;
1066
1067 dprintkdbg(DBG_0, "dc395x_bios_param..............\n");
1068 acb = (struct AdapterCtlBlk *)sdev->host->hostdata;
1069 heads = 64;
1070 sectors = 32;
1071 cylinders = size / (heads * sectors);
1072
1073 if ((acb->gmode2 & NAC_GREATER_1G) && (cylinders > 1024)) {
1074 heads = 255;
1075 sectors = 63;
1076 cylinders = size / (heads * sectors);
1077 }
1078 geom[0] = heads;
1079 geom[1] = sectors;
1080 geom[2] = cylinders;
1081 return 0;
1082 #else
1083 return scsicam_bios_param(bdev, capacity, info);
1084 #endif
1085 }
1086
1087
1088 static void dump_register_info(struct AdapterCtlBlk *acb,
1089 struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
1090 {
1091 u16 pstat;
1092 struct pci_dev *dev = acb->dev;
1093 pci_read_config_word(dev, PCI_STATUS, &pstat);
1094 if (!dcb)
1095 dcb = acb->active_dcb;
1096 if (!srb && dcb)
1097 srb = dcb->active_srb;
1098 if (srb) {
1099 if (!srb->cmd)
1100 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
1101 srb, srb->cmd);
1102 else
1103 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p "
1104 "cmnd=0x%02x <%02i-%i>\n",
1105 srb, srb->cmd,
1106 srb->cmd->cmnd[0], srb->cmd->device->id,
1107 (u8)srb->cmd->device->lun);
1108 printk(" sglist=%p cnt=%i idx=%i len=%zu\n",
1109 srb->segment_x, srb->sg_count, srb->sg_index,
1110 srb->total_xfer_length);
1111 printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n",
1112 srb->state, srb->status, srb->scsi_phase,
1113 (acb->active_dcb) ? "" : "not");
1114 }
1115 dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x "
1116 "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x "
1117 "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x "
1118 "config2=0x%02x cmd=0x%02x selto=0x%02x}\n",
1119 DC395x_read16(acb, TRM_S1040_SCSI_STATUS),
1120 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
1121 DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL),
1122 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS),
1123 DC395x_read8(acb, TRM_S1040_SCSI_SYNC),
1124 DC395x_read8(acb, TRM_S1040_SCSI_TARGETID),
1125 DC395x_read8(acb, TRM_S1040_SCSI_IDMSG),
1126 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
1127 DC395x_read8(acb, TRM_S1040_SCSI_INTEN),
1128 DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0),
1129 DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2),
1130 DC395x_read8(acb, TRM_S1040_SCSI_COMMAND),
1131 DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT));
1132 dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x "
1133 "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x "
1134 "ctctr=0x%08x addr=0x%08x:0x%08x}\n",
1135 DC395x_read16(acb, TRM_S1040_DMA_COMMAND),
1136 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
1137 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
1138 DC395x_read8(acb, TRM_S1040_DMA_STATUS),
1139 DC395x_read8(acb, TRM_S1040_DMA_INTEN),
1140 DC395x_read16(acb, TRM_S1040_DMA_CONFIG),
1141 DC395x_read32(acb, TRM_S1040_DMA_XCNT),
1142 DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
1143 DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR),
1144 DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR));
1145 dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} "
1146 "pci{status=0x%04x}\n",
1147 DC395x_read8(acb, TRM_S1040_GEN_CONTROL),
1148 DC395x_read8(acb, TRM_S1040_GEN_STATUS),
1149 DC395x_read8(acb, TRM_S1040_GEN_TIMER),
1150 pstat);
1151 }
1152
1153
1154 static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt)
1155 {
1156 #if debug_enabled(DBG_FIFO)
1157 u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
1158 u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
1159 if (!(fifocnt & 0x40))
1160 dprintkdbg(DBG_FIFO,
1161 "clear_fifo: (%i bytes) on phase %02x in %s\n",
1162 fifocnt & 0x3f, lines, txt);
1163 #endif
1164 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO);
1165 }
1166
1167
1168 static void reset_dev_param(struct AdapterCtlBlk *acb)
1169 {
1170 struct DeviceCtlBlk *dcb;
1171 struct NvRamType *eeprom = &acb->eeprom;
1172 dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb);
1173
1174 list_for_each_entry(dcb, &acb->dcb_list, list) {
1175 u8 period_index;
1176
1177 dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE);
1178 dcb->sync_period = 0;
1179 dcb->sync_offset = 0;
1180
1181 dcb->dev_mode = eeprom->target[dcb->target_id].cfg0;
1182 period_index = eeprom->target[dcb->target_id].period & 0x07;
1183 dcb->min_nego_period = clock_period[period_index];
1184 if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO)
1185 || !(acb->config & HCC_WIDE_CARD))
1186 dcb->sync_mode &= ~WIDE_NEGO_ENABLE;
1187 }
1188 }
1189
1190
1191
1192
1193
1194
1195
1196 static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
1197 {
1198 struct AdapterCtlBlk *acb =
1199 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1200 dprintkl(KERN_INFO,
1201 "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n",
1202 cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
1203
1204 if (timer_pending(&acb->waiting_timer))
1205 del_timer(&acb->waiting_timer);
1206
1207
1208
1209
1210 DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
1211 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
1212 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
1213 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
1214
1215 reset_scsi_bus(acb);
1216 udelay(500);
1217
1218
1219 acb->last_reset =
1220 jiffies + 3 * HZ / 2 +
1221 HZ * acb->eeprom.delay_time;
1222
1223
1224
1225
1226
1227 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
1228 clear_fifo(acb, "eh_bus_reset");
1229
1230 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
1231 set_basic_config(acb);
1232
1233 reset_dev_param(acb);
1234 doing_srb_done(acb, DID_RESET, cmd, 0);
1235 acb->active_dcb = NULL;
1236 acb->acb_flag = 0;
1237 waiting_process_next(acb);
1238
1239 return SUCCESS;
1240 }
1241
1242 static int dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
1243 {
1244 int rc;
1245
1246 spin_lock_irq(cmd->device->host->host_lock);
1247 rc = __dc395x_eh_bus_reset(cmd);
1248 spin_unlock_irq(cmd->device->host->host_lock);
1249
1250 return rc;
1251 }
1252
1253
1254
1255
1256
1257
1258 static int dc395x_eh_abort(struct scsi_cmnd *cmd)
1259 {
1260
1261
1262
1263
1264 struct AdapterCtlBlk *acb =
1265 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1266 struct DeviceCtlBlk *dcb;
1267 struct ScsiReqBlk *srb;
1268 dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n",
1269 cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
1270
1271 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
1272 if (!dcb) {
1273 dprintkl(KERN_DEBUG, "eh_abort: No such device\n");
1274 return FAILED;
1275 }
1276
1277 srb = find_cmd(cmd, &dcb->srb_waiting_list);
1278 if (srb) {
1279 list_del(&srb->list);
1280 pci_unmap_srb_sense(acb, srb);
1281 pci_unmap_srb(acb, srb);
1282 free_tag(dcb, srb);
1283 list_add_tail(&srb->list, &acb->srb_free_list);
1284 dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
1285 cmd->result = DID_ABORT << 16;
1286 return SUCCESS;
1287 }
1288 srb = find_cmd(cmd, &dcb->srb_going_list);
1289 if (srb) {
1290 dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n");
1291
1292 } else {
1293 dprintkl(KERN_DEBUG, "eh_abort: Command not found\n");
1294 }
1295 return FAILED;
1296 }
1297
1298
1299
1300 static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
1301 struct ScsiReqBlk *srb)
1302 {
1303 u8 *ptr = srb->msgout_buf + srb->msg_count;
1304 if (srb->msg_count > 1) {
1305 dprintkl(KERN_INFO,
1306 "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n",
1307 srb->msg_count, srb->msgout_buf[0],
1308 srb->msgout_buf[1]);
1309 return;
1310 }
1311 if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
1312 dcb->sync_offset = 0;
1313 dcb->min_nego_period = 200 >> 2;
1314 } else if (dcb->sync_offset == 0)
1315 dcb->sync_offset = SYNC_NEGO_OFFSET;
1316
1317 *ptr++ = MSG_EXTENDED;
1318 *ptr++ = 3;
1319 *ptr++ = EXTENDED_SDTR;
1320 *ptr++ = dcb->min_nego_period;
1321 *ptr++ = dcb->sync_offset;
1322 srb->msg_count += 5;
1323 srb->state |= SRB_DO_SYNC_NEGO;
1324 }
1325
1326
1327
1328 static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
1329 struct ScsiReqBlk *srb)
1330 {
1331 u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
1332 (acb->config & HCC_WIDE_CARD)) ? 1 : 0;
1333 u8 *ptr = srb->msgout_buf + srb->msg_count;
1334 if (srb->msg_count > 1) {
1335 dprintkl(KERN_INFO,
1336 "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n",
1337 srb->msg_count, srb->msgout_buf[0],
1338 srb->msgout_buf[1]);
1339 return;
1340 }
1341 *ptr++ = MSG_EXTENDED;
1342 *ptr++ = 2;
1343 *ptr++ = EXTENDED_WDTR;
1344 *ptr++ = wide;
1345 srb->msg_count += 4;
1346 srb->state |= SRB_DO_WIDE_NEGO;
1347 }
1348
1349
1350 #if 0
1351
1352
1353 void selection_timeout_missed(unsigned long ptr);
1354
1355 static void selto_timer(struct AdapterCtlBlk *acb)
1356 {
1357 if (timer_pending(&acb->selto_timer))
1358 return;
1359 acb->selto_timer.function = selection_timeout_missed;
1360 acb->selto_timer.data = (unsigned long) acb;
1361 if (time_before
1362 (jiffies + HZ, acb->last_reset + HZ / 2))
1363 acb->selto_timer.expires =
1364 acb->last_reset + HZ / 2 + 1;
1365 else
1366 acb->selto_timer.expires = jiffies + HZ + 1;
1367 add_timer(&acb->selto_timer);
1368 }
1369
1370
1371 void selection_timeout_missed(unsigned long ptr)
1372 {
1373 unsigned long flags;
1374 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
1375 struct ScsiReqBlk *srb;
1376 dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n");
1377 if (!acb->active_dcb || !acb->active_dcb->active_srb) {
1378 dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n");
1379 return;
1380 }
1381 DC395x_LOCK_IO(acb->scsi_host, flags);
1382 srb = acb->active_dcb->active_srb;
1383 disconnect(acb);
1384 DC395x_UNLOCK_IO(acb->scsi_host, flags);
1385 }
1386 #endif
1387
1388
1389 static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1390 struct ScsiReqBlk* srb)
1391 {
1392 u16 s_stat2, return_code;
1393 u8 s_stat, scsicommand, i, identify_message;
1394 u8 *ptr;
1395 dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
1396 dcb->target_id, dcb->target_lun, srb);
1397
1398 srb->tag_number = TAG_NONE;
1399
1400 s_stat = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
1401 s_stat2 = 0;
1402 s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
1403 #if 1
1404 if (s_stat & 0x20 ) {
1405 dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n",
1406 s_stat, s_stat2);
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418 return 1;
1419 }
1420 #endif
1421 if (acb->active_dcb) {
1422 dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a"
1423 "command while another command (0x%p) is active.",
1424 srb->cmd,
1425 acb->active_dcb->active_srb ?
1426 acb->active_dcb->active_srb->cmd : 0);
1427 return 1;
1428 }
1429 if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
1430 dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd);
1431 return 1;
1432 }
1433
1434
1435 if (time_before(jiffies, acb->last_reset - HZ / 2)) {
1436 dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
1437 return 1;
1438 }
1439
1440
1441 clear_fifo(acb, "start_scsi");
1442 DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
1443 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
1444 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
1445 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
1446 srb->scsi_phase = PH_BUS_FREE;
1447
1448 identify_message = dcb->identify_msg;
1449
1450
1451 if (srb->flag & AUTO_REQSENSE)
1452 identify_message &= 0xBF;
1453
1454 if (((srb->cmd->cmnd[0] == INQUIRY)
1455 || (srb->cmd->cmnd[0] == REQUEST_SENSE)
1456 || (srb->flag & AUTO_REQSENSE))
1457 && (((dcb->sync_mode & WIDE_NEGO_ENABLE)
1458 && !(dcb->sync_mode & WIDE_NEGO_DONE))
1459 || ((dcb->sync_mode & SYNC_NEGO_ENABLE)
1460 && !(dcb->sync_mode & SYNC_NEGO_DONE)))
1461 && (dcb->target_lun == 0)) {
1462 srb->msgout_buf[0] = identify_message;
1463 srb->msg_count = 1;
1464 scsicommand = SCMD_SEL_ATNSTOP;
1465 srb->state = SRB_MSGOUT;
1466 #ifndef SYNC_FIRST
1467 if (dcb->sync_mode & WIDE_NEGO_ENABLE
1468 && dcb->inquiry7 & SCSI_INQ_WBUS16) {
1469 build_wdtr(acb, dcb, srb);
1470 goto no_cmd;
1471 }
1472 #endif
1473 if (dcb->sync_mode & SYNC_NEGO_ENABLE
1474 && dcb->inquiry7 & SCSI_INQ_SYNC) {
1475 build_sdtr(acb, dcb, srb);
1476 goto no_cmd;
1477 }
1478 if (dcb->sync_mode & WIDE_NEGO_ENABLE
1479 && dcb->inquiry7 & SCSI_INQ_WBUS16) {
1480 build_wdtr(acb, dcb, srb);
1481 goto no_cmd;
1482 }
1483 srb->msg_count = 0;
1484 }
1485
1486 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, identify_message);
1487
1488 scsicommand = SCMD_SEL_ATN;
1489 srb->state = SRB_START_;
1490 #ifndef DC395x_NO_TAGQ
1491 if ((dcb->sync_mode & EN_TAG_QUEUEING)
1492 && (identify_message & 0xC0)) {
1493
1494 u32 tag_mask = 1;
1495 u8 tag_number = 0;
1496 while (tag_mask & dcb->tag_mask
1497 && tag_number < dcb->max_command) {
1498 tag_mask = tag_mask << 1;
1499 tag_number++;
1500 }
1501 if (tag_number >= dcb->max_command) {
1502 dprintkl(KERN_WARNING, "start_scsi: (0x%p) "
1503 "Out of tags target=<%02i-%i>)\n",
1504 srb->cmd, srb->cmd->device->id,
1505 (u8)srb->cmd->device->lun);
1506 srb->state = SRB_READY;
1507 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
1508 DO_HWRESELECT);
1509 return 1;
1510 }
1511
1512 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_SIMPLE_QTAG);
1513 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number);
1514 dcb->tag_mask |= tag_mask;
1515 srb->tag_number = tag_number;
1516 scsicommand = SCMD_SEL_ATN3;
1517 srb->state = SRB_START_;
1518 }
1519 #endif
1520
1521
1522 dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n",
1523 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
1524 srb->cmd->cmnd[0], srb->tag_number);
1525 if (srb->flag & AUTO_REQSENSE) {
1526 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
1527 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
1528 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1529 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1530 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
1531 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1532 } else {
1533 ptr = (u8 *)srb->cmd->cmnd;
1534 for (i = 0; i < srb->cmd->cmd_len; i++)
1535 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
1536 }
1537 no_cmd:
1538 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
1539 DO_HWRESELECT | DO_DATALATCH);
1540 if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
1541
1542
1543
1544
1545
1546 dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n",
1547 srb->cmd, dcb->target_id, dcb->target_lun);
1548 srb->state = SRB_READY;
1549 free_tag(dcb, srb);
1550 srb->msg_count = 0;
1551 return_code = 1;
1552
1553 } else {
1554
1555
1556
1557
1558 srb->scsi_phase = PH_BUS_FREE;
1559 dcb->active_srb = srb;
1560 acb->active_dcb = dcb;
1561 return_code = 0;
1562
1563 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
1564 DO_DATALATCH | DO_HWRESELECT);
1565
1566 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, scsicommand);
1567 }
1568 return return_code;
1569 }
1570
1571
1572 #define DC395x_ENABLE_MSGOUT \
1573 DC395x_write16 (acb, TRM_S1040_SCSI_CONTROL, DO_SETATN); \
1574 srb->state |= SRB_MSGOUT
1575
1576
1577
1578 static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
1579 struct ScsiReqBlk *srb)
1580 {
1581 srb->msgout_buf[0] = ABORT;
1582 srb->msg_count = 1;
1583 DC395x_ENABLE_MSGOUT;
1584 srb->state &= ~SRB_MSGIN;
1585 srb->state |= SRB_MSGOUT;
1586 }
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596 static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
1597 u16 scsi_status)
1598 {
1599 struct DeviceCtlBlk *dcb;
1600 struct ScsiReqBlk *srb;
1601 u16 phase;
1602 u8 scsi_intstatus;
1603 unsigned long flags;
1604 void (*dc395x_statev)(struct AdapterCtlBlk *, struct ScsiReqBlk *,
1605 u16 *);
1606
1607 DC395x_LOCK_IO(acb->scsi_host, flags);
1608
1609
1610 scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
1611 if ((scsi_status & 0x2007) == 0x2002)
1612 dprintkl(KERN_DEBUG,
1613 "COP after COP completed? %04x\n", scsi_status);
1614 if (debug_enabled(DBG_KG)) {
1615 if (scsi_intstatus & INT_SELTIMEOUT)
1616 dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n");
1617 }
1618
1619
1620 if (timer_pending(&acb->selto_timer))
1621 del_timer(&acb->selto_timer);
1622
1623 if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) {
1624 disconnect(acb);
1625 goto out_unlock;
1626 }
1627 if (scsi_intstatus & INT_RESELECTED) {
1628 reselect(acb);
1629 goto out_unlock;
1630 }
1631 if (scsi_intstatus & INT_SELECT) {
1632 dprintkl(KERN_INFO, "Host does not support target mode!\n");
1633 goto out_unlock;
1634 }
1635 if (scsi_intstatus & INT_SCSIRESET) {
1636 scsi_reset_detect(acb);
1637 goto out_unlock;
1638 }
1639 if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) {
1640 dcb = acb->active_dcb;
1641 if (!dcb) {
1642 dprintkl(KERN_DEBUG,
1643 "Oops: BusService (%04x %02x) w/o ActiveDCB!\n",
1644 scsi_status, scsi_intstatus);
1645 goto out_unlock;
1646 }
1647 srb = dcb->active_srb;
1648 if (dcb->flag & ABORT_DEV_) {
1649 dprintkdbg(DBG_0, "MsgOut Abort Device.....\n");
1650 enable_msgout_abort(acb, srb);
1651 }
1652
1653
1654 phase = (u16)srb->scsi_phase;
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669 dc395x_statev = dc395x_scsi_phase0[phase];
1670 dc395x_statev(acb, srb, &scsi_status);
1671
1672
1673
1674
1675
1676
1677 srb->scsi_phase = scsi_status & PHASEMASK;
1678 phase = (u16)scsi_status & PHASEMASK;
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692 dc395x_statev = dc395x_scsi_phase1[phase];
1693 dc395x_statev(acb, srb, &scsi_status);
1694 }
1695 out_unlock:
1696 DC395x_UNLOCK_IO(acb->scsi_host, flags);
1697 }
1698
1699
1700 static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
1701 {
1702 struct AdapterCtlBlk *acb = dev_id;
1703 u16 scsi_status;
1704 u8 dma_status;
1705 irqreturn_t handled = IRQ_NONE;
1706
1707
1708
1709
1710 scsi_status = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
1711 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
1712 if (scsi_status & SCSIINTERRUPT) {
1713
1714 dc395x_handle_interrupt(acb, scsi_status);
1715 handled = IRQ_HANDLED;
1716 }
1717 else if (dma_status & 0x20) {
1718
1719 dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status);
1720 #if 0
1721 dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n");
1722 if (acb->active_dcb) {
1723 acb->active_dcb-> flag |= ABORT_DEV_;
1724 if (acb->active_dcb->active_srb)
1725 enable_msgout_abort(acb, acb->active_dcb->active_srb);
1726 }
1727 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO);
1728 #else
1729 dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n");
1730 acb = NULL;
1731 #endif
1732 handled = IRQ_HANDLED;
1733 }
1734
1735 return handled;
1736 }
1737
1738
1739 static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1740 u16 *pscsi_status)
1741 {
1742 dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd);
1743 if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
1744 *pscsi_status = PH_BUS_FREE;
1745
1746 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1747 srb->state &= ~SRB_MSGOUT;
1748 }
1749
1750
1751 static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1752 u16 *pscsi_status)
1753 {
1754 u16 i;
1755 u8 *ptr;
1756 dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd);
1757
1758 clear_fifo(acb, "msgout_phase1");
1759 if (!(srb->state & SRB_MSGOUT)) {
1760 srb->state |= SRB_MSGOUT;
1761 dprintkl(KERN_DEBUG,
1762 "msgout_phase1: (0x%p) Phase unexpected\n",
1763 srb->cmd);
1764 }
1765 if (!srb->msg_count) {
1766 dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n",
1767 srb->cmd);
1768 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP);
1769 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1770 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
1771 return;
1772 }
1773 ptr = (u8 *)srb->msgout_buf;
1774 for (i = 0; i < srb->msg_count; i++)
1775 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
1776 srb->msg_count = 0;
1777 if (srb->msgout_buf[0] == MSG_ABORT)
1778 srb->state = SRB_ABORT_SENT;
1779
1780 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
1781 }
1782
1783
1784 static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1785 u16 *pscsi_status)
1786 {
1787 dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd);
1788 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1789 }
1790
1791
1792 static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1793 u16 *pscsi_status)
1794 {
1795 struct DeviceCtlBlk *dcb;
1796 u8 *ptr;
1797 u16 i;
1798 dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd);
1799
1800 clear_fifo(acb, "command_phase1");
1801 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN);
1802 if (!(srb->flag & AUTO_REQSENSE)) {
1803 ptr = (u8 *)srb->cmd->cmnd;
1804 for (i = 0; i < srb->cmd->cmd_len; i++) {
1805 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr);
1806 ptr++;
1807 }
1808 } else {
1809 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
1810 dcb = acb->active_dcb;
1811
1812 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
1813 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1814 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1815 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
1816 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1817 }
1818 srb->state |= SRB_COMMAND;
1819
1820 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1821
1822 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
1823 }
1824
1825
1826
1827
1828
1829
1830 static void sg_verify_length(struct ScsiReqBlk *srb)
1831 {
1832 if (debug_enabled(DBG_SG)) {
1833 unsigned len = 0;
1834 unsigned idx = srb->sg_index;
1835 struct SGentry *psge = srb->segment_x + idx;
1836 for (; idx < srb->sg_count; psge++, idx++)
1837 len += psge->length;
1838 if (len != srb->total_xfer_length)
1839 dprintkdbg(DBG_SG,
1840 "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n",
1841 srb->total_xfer_length, len);
1842 }
1843 }
1844
1845
1846
1847
1848
1849
1850 static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
1851 {
1852 u8 idx;
1853 u32 xferred = srb->total_xfer_length - left;
1854 struct SGentry *psge = srb->segment_x + srb->sg_index;
1855
1856 dprintkdbg(DBG_0,
1857 "sg_update_list: Transferred %i of %i bytes, %i remain\n",
1858 xferred, srb->total_xfer_length, left);
1859 if (xferred == 0) {
1860
1861 return;
1862 }
1863
1864 sg_verify_length(srb);
1865 srb->total_xfer_length = left;
1866 for (idx = srb->sg_index; idx < srb->sg_count; idx++) {
1867 if (xferred >= psge->length) {
1868
1869 xferred -= psge->length;
1870 } else {
1871
1872 dma_sync_single_for_cpu(&srb->dcb->acb->dev->dev,
1873 srb->sg_bus_addr, SEGMENTX_LEN,
1874 DMA_TO_DEVICE);
1875 psge->length -= xferred;
1876 psge->address += xferred;
1877 srb->sg_index = idx;
1878 dma_sync_single_for_device(&srb->dcb->acb->dev->dev,
1879 srb->sg_bus_addr, SEGMENTX_LEN,
1880 DMA_TO_DEVICE);
1881 break;
1882 }
1883 psge++;
1884 }
1885 sg_verify_length(srb);
1886 }
1887
1888
1889
1890
1891
1892
1893
1894
1895 static void sg_subtract_one(struct ScsiReqBlk *srb)
1896 {
1897 sg_update_list(srb, srb->total_xfer_length - 1);
1898 }
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909 static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
1910 struct ScsiReqBlk *srb)
1911 {
1912
1913 if (DC395x_read16(acb, TRM_S1040_DMA_COMMAND) & 0x0001) {
1914 if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
1915 clear_fifo(acb, "cleanup/in");
1916 if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
1917 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
1918 } else {
1919 if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
1920 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
1921 if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
1922 clear_fifo(acb, "cleanup/out");
1923 }
1924 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1925 }
1926
1927
1928
1929
1930
1931
1932 #define DC395x_LASTPIO 4
1933
1934
1935 static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1936 u16 *pscsi_status)
1937 {
1938 struct DeviceCtlBlk *dcb = srb->dcb;
1939 u16 scsi_status = *pscsi_status;
1940 u32 d_left_counter = 0;
1941 dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n",
1942 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956 dprintkdbg(DBG_PIO, "data_out_phase0: "
1957 "DMA{fifocnt=0x%02x fifostat=0x%02x} "
1958 "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n",
1959 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
1960 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
1961 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
1962 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status,
1963 srb->total_xfer_length);
1964 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO);
1965
1966 if (!(srb->state & SRB_XFERPAD)) {
1967 if (scsi_status & PARITYERROR)
1968 srb->status |= PARITY_ERROR;
1969
1970
1971
1972
1973
1974
1975
1976 if (!(scsi_status & SCSIXFERDONE)) {
1977
1978
1979
1980
1981 d_left_counter =
1982 (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
1983 0x1F);
1984 if (dcb->sync_period & WIDE_SYNC)
1985 d_left_counter <<= 1;
1986
1987 dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n"
1988 "SCSI{fifocnt=0x%02x cnt=0x%08x} "
1989 "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n",
1990 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
1991 (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
1992 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
1993 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
1994 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
1995 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
1996 DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
1997 }
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007 if (srb->total_xfer_length > DC395x_LASTPIO)
2008 d_left_counter +=
2009 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
2010
2011
2012
2013
2014 if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
2015 && scsi_bufflen(srb->cmd) % 2) {
2016 d_left_counter = 0;
2017 dprintkl(KERN_INFO,
2018 "data_out_phase0: Discard 1 byte (0x%02x)\n",
2019 scsi_status);
2020 }
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031 if (d_left_counter == 0) {
2032 srb->total_xfer_length = 0;
2033 } else {
2034
2035
2036
2037
2038
2039 long oldxferred =
2040 srb->total_xfer_length - d_left_counter;
2041 const int diff =
2042 (dcb->sync_period & WIDE_SYNC) ? 2 : 1;
2043 sg_update_list(srb, d_left_counter);
2044
2045 if ((srb->segment_x[srb->sg_index].length ==
2046 diff && scsi_sg_count(srb->cmd))
2047 || ((oldxferred & ~PAGE_MASK) ==
2048 (PAGE_SIZE - diff))
2049 ) {
2050 dprintkl(KERN_INFO, "data_out_phase0: "
2051 "Work around chip bug (%i)?\n", diff);
2052 d_left_counter =
2053 srb->total_xfer_length - diff;
2054 sg_update_list(srb, d_left_counter);
2055
2056
2057
2058
2059 }
2060 }
2061 }
2062 if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) {
2063 cleanup_after_transfer(acb, srb);
2064 }
2065 }
2066
2067
2068 static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2069 u16 *pscsi_status)
2070 {
2071 dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n",
2072 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2073 clear_fifo(acb, "data_out_phase1");
2074
2075 data_io_transfer(acb, srb, XFERDATAOUT);
2076 }
2077
2078 static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2079 u16 *pscsi_status)
2080 {
2081 u16 scsi_status = *pscsi_status;
2082
2083 dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n",
2084 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099 if (!(srb->state & SRB_XFERPAD)) {
2100 u32 d_left_counter;
2101 unsigned int sc, fc;
2102
2103 if (scsi_status & PARITYERROR) {
2104 dprintkl(KERN_INFO, "data_in_phase0: (0x%p) "
2105 "Parity Error\n", srb->cmd);
2106 srb->status |= PARITY_ERROR;
2107 }
2108
2109
2110
2111
2112
2113
2114 if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) {
2115 #if 0
2116 int ctr = 6000000;
2117 dprintkl(KERN_DEBUG,
2118 "DIP0: Wait for DMA FIFO to flush ...\n");
2119
2120
2121
2122 while (!
2123 (DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) &
2124 0x80) && --ctr);
2125 if (ctr < 6000000 - 1)
2126 dprintkl(KERN_DEBUG
2127 "DIP0: Had to wait for DMA ...\n");
2128 if (!ctr)
2129 dprintkl(KERN_ERR,
2130 "Deadlock in DIP0 waiting for DMA FIFO empty!!\n");
2131
2132 #endif
2133 dprintkdbg(DBG_KG, "data_in_phase0: "
2134 "DMA{fifocnt=0x%02x fifostat=0x%02x}\n",
2135 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
2136 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT));
2137 }
2138
2139 sc = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
2140 fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
2141 d_left_counter = sc + ((fc & 0x1f)
2142 << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
2143 0));
2144 dprintkdbg(DBG_KG, "data_in_phase0: "
2145 "SCSI{fifocnt=0x%02x%s ctr=0x%08x} "
2146 "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} "
2147 "Remain{totxfer=%i scsi_fifo+ctr=%i}\n",
2148 fc,
2149 (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
2150 sc,
2151 fc,
2152 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
2153 DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
2154 srb->total_xfer_length, d_left_counter);
2155 #if DC395x_LASTPIO
2156
2157 if (d_left_counter
2158 && srb->total_xfer_length <= DC395x_LASTPIO) {
2159 size_t left_io = srb->total_xfer_length;
2160
2161
2162
2163 dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) "
2164 "for remaining %i bytes:",
2165 fc & 0x1f,
2166 (srb->dcb->sync_period & WIDE_SYNC) ?
2167 "words" : "bytes",
2168 srb->total_xfer_length);
2169 if (srb->dcb->sync_period & WIDE_SYNC)
2170 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
2171 CFG2_WIDEFIFO);
2172 while (left_io) {
2173 unsigned char *virt, *base = NULL;
2174 unsigned long flags = 0;
2175 size_t len = left_io;
2176 size_t offset = srb->request_length - left_io;
2177
2178 local_irq_save(flags);
2179
2180
2181 base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
2182 srb->sg_count, &offset, &len);
2183 virt = base + offset;
2184
2185 left_io -= len;
2186
2187 while (len) {
2188 u8 byte;
2189 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2190 *virt++ = byte;
2191
2192 if (debug_enabled(DBG_PIO))
2193 printk(" %02x", byte);
2194
2195 d_left_counter--;
2196 sg_subtract_one(srb);
2197
2198 len--;
2199
2200 fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
2201
2202 if (fc == 0x40) {
2203 left_io = 0;
2204 break;
2205 }
2206 }
2207
2208 WARN_ON((fc != 0x40) == !d_left_counter);
2209
2210 if (fc == 0x40 && (srb->dcb->sync_period & WIDE_SYNC)) {
2211
2212 if (srb->total_xfer_length > 0) {
2213 u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2214
2215 *virt++ = byte;
2216 srb->total_xfer_length--;
2217 if (debug_enabled(DBG_PIO))
2218 printk(" %02x", byte);
2219 }
2220
2221 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
2222 }
2223
2224 scsi_kunmap_atomic_sg(base);
2225 local_irq_restore(flags);
2226 }
2227
2228
2229 if (debug_enabled(DBG_PIO))
2230 printk("\n");
2231 }
2232 #endif
2233
2234 #if 0
2235
2236
2237
2238
2239 if (!(scsi_status & SCSIXFERDONE)) {
2240
2241
2242
2243
2244 d_left_counter =
2245 (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
2246 0x1F);
2247 if (srb->dcb->sync_period & WIDE_SYNC)
2248 d_left_counter <<= 1;
2249
2250
2251
2252
2253
2254 }
2255 #endif
2256
2257 if (d_left_counter == 0
2258 || (scsi_status & SCSIXFERCNT_2_ZERO)) {
2259 #if 0
2260 int ctr = 6000000;
2261 u8 TempDMAstatus;
2262 do {
2263 TempDMAstatus =
2264 DC395x_read8(acb, TRM_S1040_DMA_STATUS);
2265 } while (!(TempDMAstatus & DMAXFERCOMP) && --ctr);
2266 if (!ctr)
2267 dprintkl(KERN_ERR,
2268 "Deadlock in DataInPhase0 waiting for DMA!!\n");
2269 srb->total_xfer_length = 0;
2270 #endif
2271 srb->total_xfer_length = d_left_counter;
2272 } else {
2273
2274
2275
2276
2277
2278
2279
2280
2281 sg_update_list(srb, d_left_counter);
2282 }
2283 }
2284
2285 if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) {
2286 cleanup_after_transfer(acb, srb);
2287 }
2288 }
2289
2290
2291 static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2292 u16 *pscsi_status)
2293 {
2294 dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n",
2295 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2296 data_io_transfer(acb, srb, XFERDATAIN);
2297 }
2298
2299
2300 static void data_io_transfer(struct AdapterCtlBlk *acb,
2301 struct ScsiReqBlk *srb, u16 io_dir)
2302 {
2303 struct DeviceCtlBlk *dcb = srb->dcb;
2304 u8 bval;
2305 dprintkdbg(DBG_0,
2306 "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
2307 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
2308 ((io_dir & DMACMD_DIR) ? 'r' : 'w'),
2309 srb->total_xfer_length, srb->sg_index, srb->sg_count);
2310 if (srb == acb->tmp_srb)
2311 dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n");
2312 if (srb->sg_index >= srb->sg_count) {
2313
2314 return;
2315 }
2316
2317 if (srb->total_xfer_length > DC395x_LASTPIO) {
2318 u8 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
2319
2320
2321
2322
2323 if (dma_status & XFERPENDING) {
2324 dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! "
2325 "Expect trouble!\n");
2326 dump_register_info(acb, dcb, srb);
2327 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
2328 }
2329
2330
2331
2332
2333
2334 srb->state |= SRB_DATA_XFER;
2335 DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0);
2336 if (scsi_sg_count(srb->cmd)) {
2337 io_dir |= DMACMD_SG;
2338 DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
2339 srb->sg_bus_addr +
2340 sizeof(struct SGentry) *
2341 srb->sg_index);
2342
2343 DC395x_write32(acb, TRM_S1040_DMA_XCNT,
2344 ((u32)(srb->sg_count -
2345 srb->sg_index) << 3));
2346 } else {
2347 io_dir &= ~DMACMD_SG;
2348 DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
2349 srb->segment_x[0].address);
2350 DC395x_write32(acb, TRM_S1040_DMA_XCNT,
2351 srb->segment_x[0].length);
2352 }
2353
2354 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
2355 srb->total_xfer_length);
2356 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2357 if (io_dir & DMACMD_DIR) {
2358 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2359 SCMD_DMA_IN);
2360 DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
2361 } else {
2362 DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
2363 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2364 SCMD_DMA_OUT);
2365 }
2366
2367 }
2368 #if DC395x_LASTPIO
2369 else if (srb->total_xfer_length > 0) {
2370
2371
2372
2373
2374 srb->state |= SRB_DATA_XFER;
2375
2376 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
2377 srb->total_xfer_length);
2378 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2379 if (io_dir & DMACMD_DIR) {
2380 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2381 SCMD_FIFO_IN);
2382 } else {
2383 int ln = srb->total_xfer_length;
2384 size_t left_io = srb->total_xfer_length;
2385
2386 if (srb->dcb->sync_period & WIDE_SYNC)
2387 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
2388 CFG2_WIDEFIFO);
2389
2390 while (left_io) {
2391 unsigned char *virt, *base = NULL;
2392 unsigned long flags = 0;
2393 size_t len = left_io;
2394 size_t offset = srb->request_length - left_io;
2395
2396 local_irq_save(flags);
2397
2398 base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
2399 srb->sg_count, &offset, &len);
2400 virt = base + offset;
2401
2402 left_io -= len;
2403
2404 while (len--) {
2405 if (debug_enabled(DBG_PIO))
2406 printk(" %02x", *virt);
2407
2408 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *virt++);
2409
2410 sg_subtract_one(srb);
2411 }
2412
2413 scsi_kunmap_atomic_sg(base);
2414 local_irq_restore(flags);
2415 }
2416 if (srb->dcb->sync_period & WIDE_SYNC) {
2417 if (ln % 2) {
2418 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
2419 if (debug_enabled(DBG_PIO))
2420 printk(" |00");
2421 }
2422 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
2423 }
2424
2425 if (debug_enabled(DBG_PIO))
2426 printk("\n");
2427 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2428 SCMD_FIFO_OUT);
2429 }
2430 }
2431 #endif
2432 else {
2433 u8 data = 0, data2 = 0;
2434 if (srb->sg_count) {
2435 srb->adapter_status = H_OVER_UNDER_RUN;
2436 srb->status |= OVER_RUN;
2437 }
2438
2439
2440
2441
2442
2443 if (dcb->sync_period & WIDE_SYNC) {
2444 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 2);
2445 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
2446 CFG2_WIDEFIFO);
2447 if (io_dir & DMACMD_DIR) {
2448 data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2449 data2 = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2450 } else {
2451
2452
2453
2454 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
2455 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'G');
2456 }
2457 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
2458 } else {
2459 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
2460
2461
2462 if (io_dir & DMACMD_DIR)
2463 data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2464 else
2465 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
2466 }
2467 srb->state |= SRB_XFERPAD;
2468 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2469
2470 bval = (io_dir & DMACMD_DIR) ? SCMD_FIFO_IN : SCMD_FIFO_OUT;
2471 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, bval);
2472 }
2473 }
2474
2475
2476 static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2477 u16 *pscsi_status)
2478 {
2479 dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n",
2480 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2481 srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2482 srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2483 srb->state = SRB_COMPLETED;
2484 *pscsi_status = PH_BUS_FREE;
2485 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2486 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
2487 }
2488
2489
2490 static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2491 u16 *pscsi_status)
2492 {
2493 dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n",
2494 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2495 srb->state = SRB_STATUS;
2496 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2497 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
2498 }
2499
2500
2501
2502 static inline u8 msgin_completed(u8 * msgbuf, u32 len)
2503 {
2504 if (*msgbuf == EXTENDED_MESSAGE) {
2505 if (len < 2)
2506 return 0;
2507 if (len < msgbuf[1] + 2)
2508 return 0;
2509 } else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f)
2510 if (len < 2)
2511 return 0;
2512 return 1;
2513 }
2514
2515
2516 static inline void msgin_reject(struct AdapterCtlBlk *acb,
2517 struct ScsiReqBlk *srb)
2518 {
2519 srb->msgout_buf[0] = MESSAGE_REJECT;
2520 srb->msg_count = 1;
2521 DC395x_ENABLE_MSGOUT;
2522 srb->state &= ~SRB_MSGIN;
2523 srb->state |= SRB_MSGOUT;
2524 dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n",
2525 srb->msgin_buf[0],
2526 srb->dcb->target_id, srb->dcb->target_lun);
2527 }
2528
2529
2530 static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
2531 struct DeviceCtlBlk *dcb, u8 tag)
2532 {
2533 struct ScsiReqBlk *srb = NULL;
2534 struct ScsiReqBlk *i;
2535 dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n",
2536 srb->cmd, tag, srb);
2537
2538 if (!(dcb->tag_mask & (1 << tag)))
2539 dprintkl(KERN_DEBUG,
2540 "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n",
2541 dcb->tag_mask, tag);
2542
2543 if (list_empty(&dcb->srb_going_list))
2544 goto mingx0;
2545 list_for_each_entry(i, &dcb->srb_going_list, list) {
2546 if (i->tag_number == tag) {
2547 srb = i;
2548 break;
2549 }
2550 }
2551 if (!srb)
2552 goto mingx0;
2553
2554 dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n",
2555 srb->cmd, srb->dcb->target_id, srb->dcb->target_lun);
2556 if (dcb->flag & ABORT_DEV_) {
2557
2558 enable_msgout_abort(acb, srb);
2559 }
2560
2561 if (!(srb->state & SRB_DISCONNECT))
2562 goto mingx0;
2563
2564 memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len);
2565 srb->state |= dcb->active_srb->state;
2566 srb->state |= SRB_DATA_XFER;
2567 dcb->active_srb = srb;
2568
2569 return srb;
2570
2571 mingx0:
2572 srb = acb->tmp_srb;
2573 srb->state = SRB_UNEXPECT_RESEL;
2574 dcb->active_srb = srb;
2575 srb->msgout_buf[0] = MSG_ABORT_TAG;
2576 srb->msg_count = 1;
2577 DC395x_ENABLE_MSGOUT;
2578 dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag);
2579 return srb;
2580 }
2581
2582
2583 static inline void reprogram_regs(struct AdapterCtlBlk *acb,
2584 struct DeviceCtlBlk *dcb)
2585 {
2586 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
2587 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
2588 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
2589 set_xfer_rate(acb, dcb);
2590 }
2591
2592
2593
2594 static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2595 {
2596 struct DeviceCtlBlk *dcb = srb->dcb;
2597 dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n",
2598 dcb->target_id, dcb->target_lun);
2599
2600 dcb->sync_mode &= ~(SYNC_NEGO_ENABLE);
2601 dcb->sync_mode |= SYNC_NEGO_DONE;
2602
2603 dcb->sync_offset = 0;
2604 dcb->min_nego_period = 200 >> 2;
2605 srb->state &= ~SRB_DO_SYNC_NEGO;
2606 reprogram_regs(acb, dcb);
2607 if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
2608 && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
2609 build_wdtr(acb, dcb, srb);
2610 DC395x_ENABLE_MSGOUT;
2611 dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n");
2612 }
2613 }
2614
2615
2616
2617 static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2618 {
2619 struct DeviceCtlBlk *dcb = srb->dcb;
2620 u8 bval;
2621 int fact;
2622 dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins "
2623 "(%02i.%01i MHz) Offset %i\n",
2624 dcb->target_id, srb->msgin_buf[3] << 2,
2625 (250 / srb->msgin_buf[3]),
2626 ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3],
2627 srb->msgin_buf[4]);
2628
2629 if (srb->msgin_buf[4] > 15)
2630 srb->msgin_buf[4] = 15;
2631 if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO))
2632 dcb->sync_offset = 0;
2633 else if (dcb->sync_offset == 0)
2634 dcb->sync_offset = srb->msgin_buf[4];
2635 if (srb->msgin_buf[4] > dcb->sync_offset)
2636 srb->msgin_buf[4] = dcb->sync_offset;
2637 else
2638 dcb->sync_offset = srb->msgin_buf[4];
2639 bval = 0;
2640 while (bval < 7 && (srb->msgin_buf[3] > clock_period[bval]
2641 || dcb->min_nego_period >
2642 clock_period[bval]))
2643 bval++;
2644 if (srb->msgin_buf[3] < clock_period[bval])
2645 dprintkl(KERN_INFO,
2646 "msgin_set_sync: Increase sync nego period to %ins\n",
2647 clock_period[bval] << 2);
2648 srb->msgin_buf[3] = clock_period[bval];
2649 dcb->sync_period &= 0xf0;
2650 dcb->sync_period |= ALT_SYNC | bval;
2651 dcb->min_nego_period = srb->msgin_buf[3];
2652
2653 if (dcb->sync_period & WIDE_SYNC)
2654 fact = 500;
2655 else
2656 fact = 250;
2657
2658 dprintkl(KERN_INFO,
2659 "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n",
2660 dcb->target_id, (fact == 500) ? "Wide16" : "",
2661 dcb->min_nego_period << 2, dcb->sync_offset,
2662 (fact / dcb->min_nego_period),
2663 ((fact % dcb->min_nego_period) * 10 +
2664 dcb->min_nego_period / 2) / dcb->min_nego_period);
2665
2666 if (!(srb->state & SRB_DO_SYNC_NEGO)) {
2667
2668 dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n",
2669 srb->msgin_buf[3] << 2, srb->msgin_buf[4]);
2670
2671 memcpy(srb->msgout_buf, srb->msgin_buf, 5);
2672 srb->msg_count = 5;
2673 DC395x_ENABLE_MSGOUT;
2674 dcb->sync_mode |= SYNC_NEGO_DONE;
2675 } else {
2676 if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
2677 && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
2678 build_wdtr(acb, dcb, srb);
2679 DC395x_ENABLE_MSGOUT;
2680 dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n");
2681 }
2682 }
2683 srb->state &= ~SRB_DO_SYNC_NEGO;
2684 dcb->sync_mode |= SYNC_NEGO_DONE | SYNC_NEGO_ENABLE;
2685
2686 reprogram_regs(acb, dcb);
2687 }
2688
2689
2690 static inline void msgin_set_nowide(struct AdapterCtlBlk *acb,
2691 struct ScsiReqBlk *srb)
2692 {
2693 struct DeviceCtlBlk *dcb = srb->dcb;
2694 dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id);
2695
2696 dcb->sync_period &= ~WIDE_SYNC;
2697 dcb->sync_mode &= ~(WIDE_NEGO_ENABLE);
2698 dcb->sync_mode |= WIDE_NEGO_DONE;
2699 srb->state &= ~SRB_DO_WIDE_NEGO;
2700 reprogram_regs(acb, dcb);
2701 if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
2702 && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
2703 build_sdtr(acb, dcb, srb);
2704 DC395x_ENABLE_MSGOUT;
2705 dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n");
2706 }
2707 }
2708
2709 static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2710 {
2711 struct DeviceCtlBlk *dcb = srb->dcb;
2712 u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO
2713 && acb->config & HCC_WIDE_CARD) ? 1 : 0;
2714 dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id);
2715
2716 if (srb->msgin_buf[3] > wide)
2717 srb->msgin_buf[3] = wide;
2718
2719 if (!(srb->state & SRB_DO_WIDE_NEGO)) {
2720 dprintkl(KERN_DEBUG,
2721 "msgin_set_wide: Wide nego initiated <%02i>\n",
2722 dcb->target_id);
2723 memcpy(srb->msgout_buf, srb->msgin_buf, 4);
2724 srb->msg_count = 4;
2725 srb->state |= SRB_DO_WIDE_NEGO;
2726 DC395x_ENABLE_MSGOUT;
2727 }
2728
2729 dcb->sync_mode |= (WIDE_NEGO_ENABLE | WIDE_NEGO_DONE);
2730 if (srb->msgin_buf[3] > 0)
2731 dcb->sync_period |= WIDE_SYNC;
2732 else
2733 dcb->sync_period &= ~WIDE_SYNC;
2734 srb->state &= ~SRB_DO_WIDE_NEGO;
2735
2736 dprintkdbg(DBG_1,
2737 "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n",
2738 (8 << srb->msgin_buf[3]), dcb->target_id);
2739 reprogram_regs(acb, dcb);
2740 if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
2741 && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
2742 build_sdtr(acb, dcb, srb);
2743 DC395x_ENABLE_MSGOUT;
2744 dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n");
2745 }
2746 }
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761 static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2762 u16 *pscsi_status)
2763 {
2764 struct DeviceCtlBlk *dcb = acb->active_dcb;
2765 dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd);
2766
2767 srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2768 if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
2769
2770 switch (srb->msgin_buf[0]) {
2771 case DISCONNECT:
2772 srb->state = SRB_DISCONNECT;
2773 break;
2774
2775 case SIMPLE_QUEUE_TAG:
2776 case HEAD_OF_QUEUE_TAG:
2777 case ORDERED_QUEUE_TAG:
2778 srb =
2779 msgin_qtag(acb, dcb,
2780 srb->msgin_buf[1]);
2781 break;
2782
2783 case MESSAGE_REJECT:
2784 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
2785 DO_CLRATN | DO_DATALATCH);
2786
2787 if (srb->state & SRB_DO_SYNC_NEGO) {
2788 msgin_set_async(acb, srb);
2789 break;
2790 }
2791
2792 if (srb->state & SRB_DO_WIDE_NEGO) {
2793 msgin_set_nowide(acb, srb);
2794 break;
2795 }
2796 enable_msgout_abort(acb, srb);
2797
2798 break;
2799
2800 case EXTENDED_MESSAGE:
2801
2802 if (srb->msgin_buf[1] == 3
2803 && srb->msgin_buf[2] == EXTENDED_SDTR) {
2804 msgin_set_sync(acb, srb);
2805 break;
2806 }
2807
2808 if (srb->msgin_buf[1] == 2
2809 && srb->msgin_buf[2] == EXTENDED_WDTR
2810 && srb->msgin_buf[3] <= 2) {
2811 msgin_set_wide(acb, srb);
2812 break;
2813 }
2814 msgin_reject(acb, srb);
2815 break;
2816
2817 case MSG_IGNOREWIDE:
2818
2819 dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n");
2820 break;
2821
2822 case COMMAND_COMPLETE:
2823
2824 break;
2825
2826 case SAVE_POINTERS:
2827
2828
2829
2830
2831 dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
2832 "SAVE POINTER rem=%i Ignore\n",
2833 srb->cmd, srb->total_xfer_length);
2834 break;
2835
2836 case RESTORE_POINTERS:
2837 dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n");
2838 break;
2839
2840 case ABORT:
2841 dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
2842 "<%02i-%i> ABORT msg\n",
2843 srb->cmd, dcb->target_id,
2844 dcb->target_lun);
2845 dcb->flag |= ABORT_DEV_;
2846 enable_msgout_abort(acb, srb);
2847 break;
2848
2849 default:
2850
2851 if (srb->msgin_buf[0] & IDENTIFY_BASE) {
2852 dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n");
2853 srb->msg_count = 1;
2854 srb->msgout_buf[0] = dcb->identify_msg;
2855 DC395x_ENABLE_MSGOUT;
2856 srb->state |= SRB_MSGOUT;
2857
2858 }
2859 msgin_reject(acb, srb);
2860 }
2861
2862
2863 srb->state &= ~SRB_MSGIN;
2864 acb->msg_len = 0;
2865 }
2866 *pscsi_status = PH_BUS_FREE;
2867 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2868 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
2869 }
2870
2871
2872 static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2873 u16 *pscsi_status)
2874 {
2875 dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd);
2876 clear_fifo(acb, "msgin_phase1");
2877 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
2878 if (!(srb->state & SRB_MSGIN)) {
2879 srb->state &= ~SRB_DISCONNECT;
2880 srb->state |= SRB_MSGIN;
2881 }
2882 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2883
2884 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_IN);
2885 }
2886
2887
2888 static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2889 u16 *pscsi_status)
2890 {
2891 }
2892
2893
2894 static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2895 u16 *pscsi_status)
2896 {
2897 }
2898
2899
2900 static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb)
2901 {
2902 struct DeviceCtlBlk *i;
2903
2904
2905 if (dcb->identify_msg & 0x07)
2906 return;
2907
2908 if (acb->scan_devices) {
2909 current_sync_offset = dcb->sync_offset;
2910 return;
2911 }
2912
2913 list_for_each_entry(i, &acb->dcb_list, list)
2914 if (i->target_id == dcb->target_id) {
2915 i->sync_period = dcb->sync_period;
2916 i->sync_offset = dcb->sync_offset;
2917 i->sync_mode = dcb->sync_mode;
2918 i->min_nego_period = dcb->min_nego_period;
2919 }
2920 }
2921
2922
2923 static void disconnect(struct AdapterCtlBlk *acb)
2924 {
2925 struct DeviceCtlBlk *dcb = acb->active_dcb;
2926 struct ScsiReqBlk *srb;
2927
2928 if (!dcb) {
2929 dprintkl(KERN_ERR, "disconnect: No such device\n");
2930 udelay(500);
2931
2932 acb->last_reset =
2933 jiffies + HZ / 2 +
2934 HZ * acb->eeprom.delay_time;
2935 clear_fifo(acb, "disconnectEx");
2936 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
2937 return;
2938 }
2939 srb = dcb->active_srb;
2940 acb->active_dcb = NULL;
2941 dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd);
2942
2943 srb->scsi_phase = PH_BUS_FREE;
2944 clear_fifo(acb, "disconnect");
2945 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
2946 if (srb->state & SRB_UNEXPECT_RESEL) {
2947 dprintkl(KERN_ERR,
2948 "disconnect: Unexpected reselection <%02i-%i>\n",
2949 dcb->target_id, dcb->target_lun);
2950 srb->state = 0;
2951 waiting_process_next(acb);
2952 } else if (srb->state & SRB_ABORT_SENT) {
2953 dcb->flag &= ~ABORT_DEV_;
2954 acb->last_reset = jiffies + HZ / 2 + 1;
2955 dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
2956 doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
2957 waiting_process_next(acb);
2958 } else {
2959 if ((srb->state & (SRB_START_ + SRB_MSGOUT))
2960 || !(srb->
2961 state & (SRB_DISCONNECT + SRB_COMPLETED))) {
2962
2963
2964
2965
2966
2967 if (srb->state != SRB_START_
2968 && srb->state != SRB_MSGOUT) {
2969 srb->state = SRB_READY;
2970 dprintkl(KERN_DEBUG,
2971 "disconnect: (0x%p) Unexpected\n",
2972 srb->cmd);
2973 srb->target_status = SCSI_STAT_SEL_TIMEOUT;
2974 goto disc1;
2975 } else {
2976
2977 dprintkdbg(DBG_KG, "disconnect: (0x%p) "
2978 "<%02i-%i> SelTO\n", srb->cmd,
2979 dcb->target_id, dcb->target_lun);
2980 if (srb->retry_count++ > DC395x_MAX_RETRIES
2981 || acb->scan_devices) {
2982 srb->target_status =
2983 SCSI_STAT_SEL_TIMEOUT;
2984 goto disc1;
2985 }
2986 free_tag(dcb, srb);
2987 list_move(&srb->list, &dcb->srb_waiting_list);
2988 dprintkdbg(DBG_KG,
2989 "disconnect: (0x%p) Retry\n",
2990 srb->cmd);
2991 waiting_set_timer(acb, HZ / 20);
2992 }
2993 } else if (srb->state & SRB_DISCONNECT) {
2994 u8 bval = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
2995
2996
2997
2998 if (bval & 0x40) {
2999 dprintkdbg(DBG_0, "disconnect: SCSI bus stat "
3000 " 0x%02x: ACK set! Other controllers?\n",
3001 bval);
3002
3003 } else
3004 waiting_process_next(acb);
3005 } else if (srb->state & SRB_COMPLETED) {
3006 disc1:
3007
3008
3009
3010 free_tag(dcb, srb);
3011 dcb->active_srb = NULL;
3012 srb->state = SRB_FREE;
3013 srb_done(acb, dcb, srb);
3014 }
3015 }
3016 }
3017
3018
3019 static void reselect(struct AdapterCtlBlk *acb)
3020 {
3021 struct DeviceCtlBlk *dcb = acb->active_dcb;
3022 struct ScsiReqBlk *srb = NULL;
3023 u16 rsel_tar_lun_id;
3024 u8 id, lun;
3025 u8 arblostflag = 0;
3026 dprintkdbg(DBG_0, "reselect: acb=%p\n", acb);
3027
3028 clear_fifo(acb, "reselect");
3029
3030
3031 rsel_tar_lun_id = DC395x_read16(acb, TRM_S1040_SCSI_TARGETID);
3032 if (dcb) {
3033 srb = dcb->active_srb;
3034 if (!srb) {
3035 dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, "
3036 "but active_srb == NULL\n");
3037 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
3038 return;
3039 }
3040
3041 if (!acb->scan_devices) {
3042 dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> "
3043 "Arb lost but Resel win rsel=%i stat=0x%04x\n",
3044 srb->cmd, dcb->target_id,
3045 dcb->target_lun, rsel_tar_lun_id,
3046 DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
3047 arblostflag = 1;
3048
3049
3050 srb->state = SRB_READY;
3051 free_tag(dcb, srb);
3052 list_move(&srb->list, &dcb->srb_waiting_list);
3053 waiting_set_timer(acb, HZ / 20);
3054
3055
3056 }
3057 }
3058
3059 if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8)))
3060 dprintkl(KERN_DEBUG, "reselect: Expects identify msg. "
3061 "Got %i!\n", rsel_tar_lun_id);
3062 id = rsel_tar_lun_id & 0xff;
3063 lun = (rsel_tar_lun_id >> 8) & 7;
3064 dcb = find_dcb(acb, id, lun);
3065 if (!dcb) {
3066 dprintkl(KERN_ERR, "reselect: From non existent device "
3067 "<%02i-%i>\n", id, lun);
3068 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
3069 return;
3070 }
3071 acb->active_dcb = dcb;
3072
3073 if (!(dcb->dev_mode & NTC_DO_DISCONNECT))
3074 dprintkl(KERN_DEBUG, "reselect: in spite of forbidden "
3075 "disconnection? <%02i-%i>\n",
3076 dcb->target_id, dcb->target_lun);
3077
3078 if (dcb->sync_mode & EN_TAG_QUEUEING ) {
3079 srb = acb->tmp_srb;
3080 dcb->active_srb = srb;
3081 } else {
3082
3083 srb = dcb->active_srb;
3084 if (!srb || !(srb->state & SRB_DISCONNECT)) {
3085
3086
3087
3088 dprintkl(KERN_DEBUG,
3089 "reselect: w/o disconnected cmds <%02i-%i>\n",
3090 dcb->target_id, dcb->target_lun);
3091 srb = acb->tmp_srb;
3092 srb->state = SRB_UNEXPECT_RESEL;
3093 dcb->active_srb = srb;
3094 enable_msgout_abort(acb, srb);
3095 } else {
3096 if (dcb->flag & ABORT_DEV_) {
3097
3098 enable_msgout_abort(acb, srb);
3099 } else
3100 srb->state = SRB_DATA_XFER;
3101
3102 }
3103 }
3104 srb->scsi_phase = PH_BUS_FREE;
3105
3106
3107 dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id);
3108 DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
3109 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
3110 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
3111 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
3112 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
3113
3114 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
3115 }
3116
3117
3118 static inline u8 tagq_blacklist(char *name)
3119 {
3120 #ifndef DC395x_NO_TAGQ
3121 #if 0
3122 u8 i;
3123 for (i = 0; i < BADDEVCNT; i++)
3124 if (memcmp(name, DC395x_baddevname1[i], 28) == 0)
3125 return 1;
3126 #endif
3127 return 0;
3128 #else
3129 return 1;
3130 #endif
3131 }
3132
3133
3134 static void disc_tagq_set(struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr)
3135 {
3136
3137 if ((ptr->Vers & 0x07) >= 2 || (ptr->RDF & 0x0F) == 2) {
3138 if ((ptr->Flags & SCSI_INQ_CMDQUEUE)
3139 && (dcb->dev_mode & NTC_DO_TAG_QUEUEING) &&
3140
3141
3142
3143 !tagq_blacklist(((char *)ptr) + 8)) {
3144 if (dcb->max_command == 1)
3145 dcb->max_command =
3146 dcb->acb->tag_max_num;
3147 dcb->sync_mode |= EN_TAG_QUEUEING;
3148
3149 } else
3150 dcb->max_command = 1;
3151 }
3152 }
3153
3154
3155 static void add_dev(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3156 struct ScsiInqData *ptr)
3157 {
3158 u8 bval1 = ptr->DevType & SCSI_DEVTYPE;
3159 dcb->dev_type = bval1;
3160
3161 disc_tagq_set(dcb, ptr);
3162 }
3163
3164
3165
3166 static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
3167 {
3168 struct scsi_cmnd *cmd = srb->cmd;
3169 enum dma_data_direction dir = cmd->sc_data_direction;
3170
3171 if (scsi_sg_count(cmd) && dir != PCI_DMA_NONE) {
3172
3173 dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
3174 srb->sg_bus_addr, SEGMENTX_LEN);
3175 dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN,
3176 DMA_TO_DEVICE);
3177 dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
3178 scsi_sg_count(cmd), scsi_bufflen(cmd));
3179
3180 scsi_dma_unmap(cmd);
3181 }
3182 }
3183
3184
3185
3186 static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
3187 struct ScsiReqBlk *srb)
3188 {
3189 if (!(srb->flag & AUTO_REQSENSE))
3190 return;
3191
3192 dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
3193 srb->segment_x[0].address);
3194 dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address,
3195 srb->segment_x[0].length, DMA_FROM_DEVICE);
3196
3197 srb->total_xfer_length = srb->xferred;
3198 srb->segment_x[0].address =
3199 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address;
3200 srb->segment_x[0].length =
3201 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length;
3202 }
3203
3204
3205
3206
3207
3208
3209 static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3210 struct ScsiReqBlk *srb)
3211 {
3212 u8 tempcnt, status;
3213 struct scsi_cmnd *cmd = srb->cmd;
3214 enum dma_data_direction dir = cmd->sc_data_direction;
3215 int ckc_only = 1;
3216
3217 dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd,
3218 srb->cmd->device->id, (u8)srb->cmd->device->lun);
3219 dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
3220 srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
3221 scsi_sgtalbe(cmd));
3222 status = srb->target_status;
3223 if (srb->flag & AUTO_REQSENSE) {
3224 dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
3225 pci_unmap_srb_sense(acb, srb);
3226
3227
3228
3229 srb->flag &= ~AUTO_REQSENSE;
3230 srb->adapter_status = 0;
3231 srb->target_status = CHECK_CONDITION << 1;
3232 if (debug_enabled(DBG_1)) {
3233 switch (cmd->sense_buffer[2] & 0x0f) {
3234 case NOT_READY:
3235 dprintkl(KERN_DEBUG,
3236 "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3237 cmd->cmnd[0], dcb->target_id,
3238 dcb->target_lun, status, acb->scan_devices);
3239 break;
3240 case UNIT_ATTENTION:
3241 dprintkl(KERN_DEBUG,
3242 "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3243 cmd->cmnd[0], dcb->target_id,
3244 dcb->target_lun, status, acb->scan_devices);
3245 break;
3246 case ILLEGAL_REQUEST:
3247 dprintkl(KERN_DEBUG,
3248 "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3249 cmd->cmnd[0], dcb->target_id,
3250 dcb->target_lun, status, acb->scan_devices);
3251 break;
3252 case MEDIUM_ERROR:
3253 dprintkl(KERN_DEBUG,
3254 "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3255 cmd->cmnd[0], dcb->target_id,
3256 dcb->target_lun, status, acb->scan_devices);
3257 break;
3258 case HARDWARE_ERROR:
3259 dprintkl(KERN_DEBUG,
3260 "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3261 cmd->cmnd[0], dcb->target_id,
3262 dcb->target_lun, status, acb->scan_devices);
3263 break;
3264 }
3265 if (cmd->sense_buffer[7] >= 6)
3266 printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x "
3267 "(0x%08x 0x%08x)\n",
3268 cmd->sense_buffer[2], cmd->sense_buffer[12],
3269 cmd->sense_buffer[13],
3270 *((unsigned int *)(cmd->sense_buffer + 3)),
3271 *((unsigned int *)(cmd->sense_buffer + 8)));
3272 else
3273 printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n",
3274 cmd->sense_buffer[2],
3275 *((unsigned int *)(cmd->sense_buffer + 3)));
3276 }
3277
3278 if (status == (CHECK_CONDITION << 1)) {
3279 cmd->result = DID_BAD_TARGET << 16;
3280 goto ckc_e;
3281 }
3282 dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
3283
3284 if (srb->total_xfer_length
3285 && srb->total_xfer_length >= cmd->underflow)
3286 cmd->result =
3287 MK_RES_LNX(DRIVER_SENSE, DID_OK,
3288 srb->end_message, CHECK_CONDITION);
3289
3290 else
3291 cmd->result =
3292 MK_RES_LNX(DRIVER_SENSE, DID_OK,
3293 srb->end_message, CHECK_CONDITION);
3294
3295 goto ckc_e;
3296 }
3297
3298
3299 if (status) {
3300
3301
3302
3303 if (status_byte(status) == CHECK_CONDITION) {
3304 request_sense(acb, dcb, srb);
3305 return;
3306 } else if (status_byte(status) == QUEUE_FULL) {
3307 tempcnt = (u8)list_size(&dcb->srb_going_list);
3308 dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
3309 dcb->target_id, dcb->target_lun, tempcnt);
3310 if (tempcnt > 1)
3311 tempcnt--;
3312 dcb->max_command = tempcnt;
3313 free_tag(dcb, srb);
3314 list_move(&srb->list, &dcb->srb_waiting_list);
3315 waiting_set_timer(acb, HZ / 20);
3316 srb->adapter_status = 0;
3317 srb->target_status = 0;
3318 return;
3319 } else if (status == SCSI_STAT_SEL_TIMEOUT) {
3320 srb->adapter_status = H_SEL_TIMEOUT;
3321 srb->target_status = 0;
3322 cmd->result = DID_NO_CONNECT << 16;
3323 } else {
3324 srb->adapter_status = 0;
3325 SET_RES_DID(cmd->result, DID_ERROR);
3326 SET_RES_MSG(cmd->result, srb->end_message);
3327 SET_RES_TARGET(cmd->result, status);
3328
3329 }
3330 } else {
3331
3332
3333
3334 status = srb->adapter_status;
3335 if (status & H_OVER_UNDER_RUN) {
3336 srb->target_status = 0;
3337 SET_RES_DID(cmd->result, DID_OK);
3338 SET_RES_MSG(cmd->result, srb->end_message);
3339 } else if (srb->status & PARITY_ERROR) {
3340 SET_RES_DID(cmd->result, DID_PARITY);
3341 SET_RES_MSG(cmd->result, srb->end_message);
3342 } else {
3343
3344 srb->adapter_status = 0;
3345 srb->target_status = 0;
3346 SET_RES_DID(cmd->result, DID_OK);
3347 }
3348 }
3349
3350 ckc_only = 0;
3351
3352 ckc_e:
3353
3354 pci_unmap_srb(acb, srb);
3355
3356 if (cmd->cmnd[0] == INQUIRY) {
3357 unsigned char *base = NULL;
3358 struct ScsiInqData *ptr;
3359 unsigned long flags = 0;
3360 struct scatterlist* sg = scsi_sglist(cmd);
3361 size_t offset = 0, len = sizeof(struct ScsiInqData);
3362
3363 local_irq_save(flags);
3364 base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len);
3365 ptr = (struct ScsiInqData *)(base + offset);
3366
3367 if (!ckc_only && (cmd->result & RES_DID) == 0
3368 && cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
3369 && dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
3370 dcb->inquiry7 = ptr->Flags;
3371
3372
3373
3374 if ((cmd->result == (DID_OK << 16) ||
3375 status_byte(cmd->result) == CHECK_CONDITION)) {
3376 if (!dcb->init_tcq_flag) {
3377 add_dev(acb, dcb, ptr);
3378 dcb->init_tcq_flag = 1;
3379 }
3380 }
3381
3382 scsi_kunmap_atomic_sg(base);
3383 local_irq_restore(flags);
3384 }
3385
3386
3387 scsi_set_resid(cmd, srb->total_xfer_length);
3388
3389 cmd->SCp.this_residual = srb->total_xfer_length;
3390 cmd->SCp.buffers_residual = 0;
3391 if (debug_enabled(DBG_KG)) {
3392 if (srb->total_xfer_length)
3393 dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
3394 "cmnd=0x%02x Missed %i bytes\n",
3395 cmd, cmd->device->id, (u8)cmd->device->lun,
3396 cmd->cmnd[0], srb->total_xfer_length);
3397 }
3398
3399 if (srb != acb->tmp_srb) {
3400
3401 dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
3402 cmd, cmd->result);
3403 list_move_tail(&srb->list, &acb->srb_free_list);
3404 } else {
3405 dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
3406 }
3407
3408 cmd->scsi_done(cmd);
3409 waiting_process_next(acb);
3410 }
3411
3412
3413
3414 static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
3415 struct scsi_cmnd *cmd, u8 force)
3416 {
3417 struct DeviceCtlBlk *dcb;
3418 dprintkl(KERN_INFO, "doing_srb_done: pids ");
3419
3420 list_for_each_entry(dcb, &acb->dcb_list, list) {
3421 struct ScsiReqBlk *srb;
3422 struct ScsiReqBlk *tmp;
3423 struct scsi_cmnd *p;
3424
3425 list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
3426 enum dma_data_direction dir;
3427 int result;
3428
3429 p = srb->cmd;
3430 dir = p->sc_data_direction;
3431 result = MK_RES(0, did_flag, 0, 0);
3432 printk("G:%p(%02i-%i) ", p,
3433 p->device->id, (u8)p->device->lun);
3434 list_del(&srb->list);
3435 free_tag(dcb, srb);
3436 list_add_tail(&srb->list, &acb->srb_free_list);
3437 p->result = result;
3438 pci_unmap_srb_sense(acb, srb);
3439 pci_unmap_srb(acb, srb);
3440 if (force) {
3441
3442
3443 p->scsi_done(p);
3444 }
3445 }
3446 if (!list_empty(&dcb->srb_going_list))
3447 dprintkl(KERN_DEBUG,
3448 "How could the ML send cmnds to the Going queue? <%02i-%i>\n",
3449 dcb->target_id, dcb->target_lun);
3450 if (dcb->tag_mask)
3451 dprintkl(KERN_DEBUG,
3452 "tag_mask for <%02i-%i> should be empty, is %08x!\n",
3453 dcb->target_id, dcb->target_lun,
3454 dcb->tag_mask);
3455
3456
3457 list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
3458 int result;
3459 p = srb->cmd;
3460
3461 result = MK_RES(0, did_flag, 0, 0);
3462 printk("W:%p<%02i-%i>", p, p->device->id,
3463 (u8)p->device->lun);
3464 list_move_tail(&srb->list, &acb->srb_free_list);
3465 p->result = result;
3466 pci_unmap_srb_sense(acb, srb);
3467 pci_unmap_srb(acb, srb);
3468 if (force) {
3469
3470
3471 cmd->scsi_done(cmd);
3472 }
3473 }
3474 if (!list_empty(&dcb->srb_waiting_list))
3475 dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n",
3476 list_size(&dcb->srb_waiting_list), dcb->target_id,
3477 dcb->target_lun);
3478 dcb->flag &= ~ABORT_DEV_;
3479 }
3480 printk("\n");
3481 }
3482
3483
3484 static void reset_scsi_bus(struct AdapterCtlBlk *acb)
3485 {
3486 dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb);
3487 acb->acb_flag |= RESET_DEV;
3488 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
3489
3490 while (!(DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET))
3491 ;
3492 }
3493
3494
3495 static void set_basic_config(struct AdapterCtlBlk *acb)
3496 {
3497 u8 bval;
3498 u16 wval;
3499 DC395x_write8(acb, TRM_S1040_SCSI_TIMEOUT, acb->sel_timeout);
3500 if (acb->config & HCC_PARITY)
3501 bval = PHASELATCH | INITIATOR | BLOCKRST | PARITYCHECK;
3502 else
3503 bval = PHASELATCH | INITIATOR | BLOCKRST;
3504
3505 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG0, bval);
3506
3507
3508 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG1, 0x03);
3509
3510 DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
3511
3512 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, 0x00);
3513
3514 wval = DC395x_read16(acb, TRM_S1040_GEN_CONTROL) & 0x7F;
3515 DC395x_write16(acb, TRM_S1040_GEN_CONTROL, wval);
3516
3517 wval = DC395x_read16(acb, TRM_S1040_DMA_CONFIG) & ~DMA_FIFO_CTRL;
3518 wval |=
3519 DMA_FIFO_HALF_HALF | DMA_ENHANCE ;
3520 DC395x_write16(acb, TRM_S1040_DMA_CONFIG, wval);
3521
3522 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
3523
3524 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x7F);
3525 DC395x_write8(acb, TRM_S1040_DMA_INTEN, EN_SCSIINTR | EN_DMAXFERERROR
3526
3527 );
3528 }
3529
3530
3531 static void scsi_reset_detect(struct AdapterCtlBlk *acb)
3532 {
3533 dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb);
3534
3535 if (timer_pending(&acb->waiting_timer))
3536 del_timer(&acb->waiting_timer);
3537
3538 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
3539 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
3540
3541 udelay(500);
3542
3543 acb->last_reset =
3544 jiffies + 5 * HZ / 2 +
3545 HZ * acb->eeprom.delay_time;
3546
3547 clear_fifo(acb, "scsi_reset_detect");
3548 set_basic_config(acb);
3549
3550
3551
3552 if (acb->acb_flag & RESET_DEV) {
3553 acb->acb_flag |= RESET_DONE;
3554 } else {
3555 acb->acb_flag |= RESET_DETECT;
3556 reset_dev_param(acb);
3557 doing_srb_done(acb, DID_RESET, NULL, 1);
3558
3559 acb->active_dcb = NULL;
3560 acb->acb_flag = 0;
3561 waiting_process_next(acb);
3562 }
3563 }
3564
3565
3566 static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3567 struct ScsiReqBlk *srb)
3568 {
3569 struct scsi_cmnd *cmd = srb->cmd;
3570 dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n",
3571 cmd, cmd->device->id, (u8)cmd->device->lun);
3572
3573 srb->flag |= AUTO_REQSENSE;
3574 srb->adapter_status = 0;
3575 srb->target_status = 0;
3576
3577
3578 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3579
3580
3581 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address =
3582 srb->segment_x[0].address;
3583 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length =
3584 srb->segment_x[0].length;
3585 srb->xferred = srb->total_xfer_length;
3586
3587 srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
3588 srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
3589
3590 srb->segment_x[0].address = dma_map_single(&acb->dev->dev,
3591 cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
3592 DMA_FROM_DEVICE);
3593 dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
3594 cmd->sense_buffer, srb->segment_x[0].address,
3595 SCSI_SENSE_BUFFERSIZE);
3596 srb->sg_count = 1;
3597 srb->sg_index = 0;
3598
3599 if (start_scsi(acb, dcb, srb)) {
3600 dprintkl(KERN_DEBUG,
3601 "request_sense: (0x%p) failed <%02i-%i>\n",
3602 srb->cmd, dcb->target_id, dcb->target_lun);
3603 list_move(&srb->list, &dcb->srb_waiting_list);
3604 waiting_set_timer(acb, HZ / 100);
3605 }
3606 }
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622 static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
3623 u8 target, u8 lun)
3624 {
3625 struct NvRamType *eeprom = &acb->eeprom;
3626 u8 period_index = eeprom->target[target].period & 0x07;
3627 struct DeviceCtlBlk *dcb;
3628
3629 dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC);
3630 dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun);
3631 if (!dcb)
3632 return NULL;
3633 dcb->acb = NULL;
3634 INIT_LIST_HEAD(&dcb->srb_going_list);
3635 INIT_LIST_HEAD(&dcb->srb_waiting_list);
3636 dcb->active_srb = NULL;
3637 dcb->tag_mask = 0;
3638 dcb->max_command = 1;
3639 dcb->target_id = target;
3640 dcb->target_lun = lun;
3641 dcb->dev_mode = eeprom->target[target].cfg0;
3642 #ifndef DC395x_NO_DISCONNECT
3643 dcb->identify_msg =
3644 IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
3645 #else
3646 dcb->identify_msg = IDENTIFY(0, lun);
3647 #endif
3648 dcb->inquiry7 = 0;
3649 dcb->sync_mode = 0;
3650 dcb->min_nego_period = clock_period[period_index];
3651 dcb->sync_period = 0;
3652 dcb->sync_offset = 0;
3653 dcb->flag = 0;
3654
3655 #ifndef DC395x_NO_WIDE
3656 if ((dcb->dev_mode & NTC_DO_WIDE_NEGO)
3657 && (acb->config & HCC_WIDE_CARD))
3658 dcb->sync_mode |= WIDE_NEGO_ENABLE;
3659 #endif
3660 #ifndef DC395x_NO_SYNC
3661 if (dcb->dev_mode & NTC_DO_SYNC_NEGO)
3662 if (!(lun) || current_sync_offset)
3663 dcb->sync_mode |= SYNC_NEGO_ENABLE;
3664 #endif
3665 if (dcb->target_lun != 0) {
3666
3667 struct DeviceCtlBlk *p;
3668 list_for_each_entry(p, &acb->dcb_list, list)
3669 if (p->target_id == dcb->target_id)
3670 break;
3671 dprintkdbg(DBG_1,
3672 "device_alloc: <%02i-%i> copy from <%02i-%i>\n",
3673 dcb->target_id, dcb->target_lun,
3674 p->target_id, p->target_lun);
3675 dcb->sync_mode = p->sync_mode;
3676 dcb->sync_period = p->sync_period;
3677 dcb->min_nego_period = p->min_nego_period;
3678 dcb->sync_offset = p->sync_offset;
3679 dcb->inquiry7 = p->inquiry7;
3680 }
3681 return dcb;
3682 }
3683
3684
3685
3686
3687
3688
3689
3690
3691 static void adapter_add_device(struct AdapterCtlBlk *acb,
3692 struct DeviceCtlBlk *dcb)
3693 {
3694
3695 dcb->acb = acb;
3696
3697
3698 if (list_empty(&acb->dcb_list))
3699 acb->dcb_run_robin = dcb;
3700
3701
3702 list_add_tail(&dcb->list, &acb->dcb_list);
3703
3704
3705 acb->dcb_map[dcb->target_id] |= (1 << dcb->target_lun);
3706 acb->children[dcb->target_id][dcb->target_lun] = dcb;
3707 }
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719 static void adapter_remove_device(struct AdapterCtlBlk *acb,
3720 struct DeviceCtlBlk *dcb)
3721 {
3722 struct DeviceCtlBlk *i;
3723 struct DeviceCtlBlk *tmp;
3724 dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n",
3725 dcb->target_id, dcb->target_lun);
3726
3727
3728 if (acb->active_dcb == dcb)
3729 acb->active_dcb = NULL;
3730 if (acb->dcb_run_robin == dcb)
3731 acb->dcb_run_robin = dcb_get_next(&acb->dcb_list, dcb);
3732
3733
3734 list_for_each_entry_safe(i, tmp, &acb->dcb_list, list)
3735 if (dcb == i) {
3736 list_del(&i->list);
3737 break;
3738 }
3739
3740
3741 acb->dcb_map[dcb->target_id] &= ~(1 << dcb->target_lun);
3742 acb->children[dcb->target_id][dcb->target_lun] = NULL;
3743 dcb->acb = NULL;
3744 }
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754 static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb,
3755 struct DeviceCtlBlk *dcb)
3756 {
3757 if (list_size(&dcb->srb_going_list) > 1) {
3758 dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> "
3759 "Won't remove because of %i active requests.\n",
3760 dcb->target_id, dcb->target_lun,
3761 list_size(&dcb->srb_going_list));
3762 return;
3763 }
3764 adapter_remove_device(acb, dcb);
3765 kfree(dcb);
3766 }
3767
3768
3769
3770
3771
3772
3773
3774
3775 static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
3776 {
3777 struct DeviceCtlBlk *dcb;
3778 struct DeviceCtlBlk *tmp;
3779 dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n",
3780 list_size(&acb->dcb_list));
3781
3782 list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list)
3783 adapter_remove_and_free_device(acb, dcb);
3784 }
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794 static int dc395x_slave_alloc(struct scsi_device *scsi_device)
3795 {
3796 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
3797 struct DeviceCtlBlk *dcb;
3798
3799 dcb = device_alloc(acb, scsi_device->id, scsi_device->lun);
3800 if (!dcb)
3801 return -ENOMEM;
3802 adapter_add_device(acb, dcb);
3803
3804 return 0;
3805 }
3806
3807
3808
3809
3810
3811
3812
3813
3814 static void dc395x_slave_destroy(struct scsi_device *scsi_device)
3815 {
3816 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
3817 struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
3818 if (dcb)
3819 adapter_remove_and_free_device(acb, dcb);
3820 }
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832 static void trms1040_wait_30us(unsigned long io_port)
3833 {
3834
3835 outb(5, io_port + TRM_S1040_GEN_TIMER);
3836 while (!(inb(io_port + TRM_S1040_GEN_STATUS) & GTIMEOUT))
3837 ;
3838 }
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849 static void trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
3850 {
3851 int i;
3852 u8 send_data;
3853
3854
3855 for (i = 0; i < 3; i++, cmd <<= 1) {
3856 send_data = NVR_SELECT;
3857 if (cmd & 0x04)
3858 send_data |= NVR_BITOUT;
3859
3860 outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
3861 trms1040_wait_30us(io_port);
3862 outb((send_data | NVR_CLOCK),
3863 io_port + TRM_S1040_GEN_NVRAM);
3864 trms1040_wait_30us(io_port);
3865 }
3866
3867
3868 for (i = 0; i < 7; i++, addr <<= 1) {
3869 send_data = NVR_SELECT;
3870 if (addr & 0x40)
3871 send_data |= NVR_BITOUT;
3872
3873 outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
3874 trms1040_wait_30us(io_port);
3875 outb((send_data | NVR_CLOCK),
3876 io_port + TRM_S1040_GEN_NVRAM);
3877 trms1040_wait_30us(io_port);
3878 }
3879 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
3880 trms1040_wait_30us(io_port);
3881 }
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894 static void trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
3895 {
3896 int i;
3897 u8 send_data;
3898
3899
3900 trms1040_write_cmd(io_port, 0x05, addr);
3901
3902
3903 for (i = 0; i < 8; i++, byte <<= 1) {
3904 send_data = NVR_SELECT;
3905 if (byte & 0x80)
3906 send_data |= NVR_BITOUT;
3907
3908 outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
3909 trms1040_wait_30us(io_port);
3910 outb((send_data | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
3911 trms1040_wait_30us(io_port);
3912 }
3913 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
3914 trms1040_wait_30us(io_port);
3915
3916
3917 outb(0, io_port + TRM_S1040_GEN_NVRAM);
3918 trms1040_wait_30us(io_port);
3919
3920 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
3921 trms1040_wait_30us(io_port);
3922
3923
3924 while (1) {
3925 outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
3926 trms1040_wait_30us(io_port);
3927
3928 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
3929 trms1040_wait_30us(io_port);
3930
3931 if (inb(io_port + TRM_S1040_GEN_NVRAM) & NVR_BITIN)
3932 break;
3933 }
3934
3935
3936 outb(0, io_port + TRM_S1040_GEN_NVRAM);
3937 }
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948 static void trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port)
3949 {
3950 u8 *b_eeprom = (u8 *)eeprom;
3951 u8 addr;
3952
3953
3954 outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
3955 io_port + TRM_S1040_GEN_CONTROL);
3956
3957
3958 trms1040_write_cmd(io_port, 0x04, 0xFF);
3959 outb(0, io_port + TRM_S1040_GEN_NVRAM);
3960 trms1040_wait_30us(io_port);
3961
3962
3963 for (addr = 0; addr < 128; addr++, b_eeprom++)
3964 trms1040_set_data(io_port, addr, *b_eeprom);
3965
3966
3967 trms1040_write_cmd(io_port, 0x04, 0x00);
3968 outb(0, io_port + TRM_S1040_GEN_NVRAM);
3969 trms1040_wait_30us(io_port);
3970
3971
3972 outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
3973 io_port + TRM_S1040_GEN_CONTROL);
3974 }
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988 static u8 trms1040_get_data(unsigned long io_port, u8 addr)
3989 {
3990 int i;
3991 u8 read_byte;
3992 u8 result = 0;
3993
3994
3995 trms1040_write_cmd(io_port, 0x06, addr);
3996
3997
3998 for (i = 0; i < 8; i++) {
3999 outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
4000 trms1040_wait_30us(io_port);
4001 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
4002
4003
4004 read_byte = inb(io_port + TRM_S1040_GEN_NVRAM);
4005 result <<= 1;
4006 if (read_byte & NVR_BITIN)
4007 result |= 1;
4008
4009 trms1040_wait_30us(io_port);
4010 }
4011
4012
4013 outb(0, io_port + TRM_S1040_GEN_NVRAM);
4014 return result;
4015 }
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026 static void trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port)
4027 {
4028 u8 *b_eeprom = (u8 *)eeprom;
4029 u8 addr;
4030
4031
4032 outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
4033 io_port + TRM_S1040_GEN_CONTROL);
4034
4035
4036 for (addr = 0; addr < 128; addr++, b_eeprom++)
4037 *b_eeprom = trms1040_get_data(io_port, addr);
4038
4039
4040 outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
4041 io_port + TRM_S1040_GEN_CONTROL);
4042 }
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056 static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
4057 {
4058 u16 *w_eeprom = (u16 *)eeprom;
4059 u16 w_addr;
4060 u16 cksum;
4061 u32 d_addr;
4062 u32 *d_eeprom;
4063
4064 trms1040_read_all(eeprom, io_port);
4065
4066 cksum = 0;
4067 for (w_addr = 0, w_eeprom = (u16 *)eeprom; w_addr < 64;
4068 w_addr++, w_eeprom++)
4069 cksum += *w_eeprom;
4070 if (cksum != 0x1234) {
4071
4072
4073
4074
4075 dprintkl(KERN_WARNING,
4076 "EEProm checksum error: using default values and options.\n");
4077 eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
4078 eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
4079 eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
4080 eeprom->sub_sys_id[1] =
4081 (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
4082 eeprom->sub_class = 0x00;
4083 eeprom->vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
4084 eeprom->vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
4085 eeprom->device_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
4086 eeprom->device_id[1] =
4087 (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
4088 eeprom->reserved = 0x00;
4089
4090 for (d_addr = 0, d_eeprom = (u32 *)eeprom->target;
4091 d_addr < 16; d_addr++, d_eeprom++)
4092 *d_eeprom = 0x00000077;
4093
4094 *d_eeprom++ = 0x04000F07;
4095 *d_eeprom++ = 0x00000015;
4096 for (d_addr = 0; d_addr < 12; d_addr++, d_eeprom++)
4097 *d_eeprom = 0x00;
4098
4099
4100 set_safe_settings();
4101 fix_settings();
4102 eeprom_override(eeprom);
4103
4104 eeprom->cksum = 0x00;
4105 for (w_addr = 0, cksum = 0, w_eeprom = (u16 *)eeprom;
4106 w_addr < 63; w_addr++, w_eeprom++)
4107 cksum += *w_eeprom;
4108
4109 *w_eeprom = 0x1234 - cksum;
4110 trms1040_write_all(eeprom, io_port);
4111 eeprom->delay_time = cfg_data[CFG_RESET_DELAY].value;
4112 } else {
4113 set_safe_settings();
4114 eeprom_index_to_delay(eeprom);
4115 eeprom_override(eeprom);
4116 }
4117 }
4118
4119
4120
4121
4122
4123
4124
4125
4126 static void print_eeprom_settings(struct NvRamType *eeprom)
4127 {
4128 dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n",
4129 eeprom->scsi_id,
4130 eeprom->target[0].period,
4131 clock_speed[eeprom->target[0].period] / 10,
4132 clock_speed[eeprom->target[0].period] % 10,
4133 eeprom->target[0].cfg0);
4134 dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n",
4135 eeprom->channel_cfg, eeprom->max_tag,
4136 1 << eeprom->max_tag, eeprom->delay_time);
4137 }
4138
4139
4140
4141 static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
4142 {
4143 int i;
4144 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
4145
4146 for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page)
4147 kfree(acb->srb_array[i].segment_x);
4148 }
4149
4150
4151
4152
4153
4154 static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
4155 {
4156 const unsigned mem_needed = (DC395x_MAX_SRB_CNT+1)
4157 *SEGMENTX_LEN;
4158 int pages = (mem_needed+(PAGE_SIZE-1))/PAGE_SIZE;
4159 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
4160 int srb_idx = 0;
4161 unsigned i = 0;
4162 struct SGentry *uninitialized_var(ptr);
4163
4164 for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
4165 acb->srb_array[i].segment_x = NULL;
4166
4167 dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
4168 while (pages--) {
4169 ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
4170 if (!ptr) {
4171 adapter_sg_tables_free(acb);
4172 return 1;
4173 }
4174 dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n",
4175 PAGE_SIZE, ptr, srb_idx);
4176 i = 0;
4177 while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT)
4178 acb->srb_array[srb_idx++].segment_x =
4179 ptr + (i++ * DC395x_MAX_SG_LISTENTRY);
4180 }
4181 if (i < srbs_per_page)
4182 acb->srb.segment_x =
4183 ptr + (i * DC395x_MAX_SG_LISTENTRY);
4184 else
4185 dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
4186 return 0;
4187 }
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200 static void adapter_print_config(struct AdapterCtlBlk *acb)
4201 {
4202 u8 bval;
4203
4204 bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS);
4205 dprintkl(KERN_INFO, "%sConnectors: ",
4206 ((bval & WIDESCSI) ? "(Wide) " : ""));
4207 if (!(bval & CON5068))
4208 printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50");
4209 if (!(bval & CON68))
4210 printk("int68%s ", !(bval & INT68HIGH) ? "" : "(50)");
4211 if (!(bval & CON50))
4212 printk("int50 ");
4213 if ((bval & (CON5068 | CON50 | CON68)) ==
4214 0 )
4215 printk(" Oops! (All 3?) ");
4216 bval = DC395x_read8(acb, TRM_S1040_GEN_CONTROL);
4217 printk(" Termination: ");
4218 if (bval & DIS_TERM)
4219 printk("Disabled\n");
4220 else {
4221 if (bval & AUTOTERM)
4222 printk("Auto ");
4223 if (bval & LOW8TERM)
4224 printk("Low ");
4225 if (bval & UP8TERM)
4226 printk("High ");
4227 printk("\n");
4228 }
4229 }
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244 static void adapter_init_params(struct AdapterCtlBlk *acb)
4245 {
4246 struct NvRamType *eeprom = &acb->eeprom;
4247 int i;
4248
4249
4250
4251
4252
4253 INIT_LIST_HEAD(&acb->dcb_list);
4254 acb->dcb_run_robin = NULL;
4255 acb->active_dcb = NULL;
4256
4257 INIT_LIST_HEAD(&acb->srb_free_list);
4258
4259 acb->tmp_srb = &acb->srb;
4260 timer_setup(&acb->waiting_timer, waiting_timeout, 0);
4261 timer_setup(&acb->selto_timer, NULL, 0);
4262
4263 acb->srb_count = DC395x_MAX_SRB_CNT;
4264
4265 acb->sel_timeout = DC395x_SEL_TIMEOUT;
4266
4267
4268 acb->tag_max_num = 1 << eeprom->max_tag;
4269 if (acb->tag_max_num > 30)
4270 acb->tag_max_num = 30;
4271
4272 acb->acb_flag = 0;
4273 acb->gmode2 = eeprom->channel_cfg;
4274 acb->config = 0;
4275
4276 if (eeprom->channel_cfg & NAC_SCANLUN)
4277 acb->lun_chk = 1;
4278 acb->scan_devices = 1;
4279
4280 acb->scsi_host->this_id = eeprom->scsi_id;
4281 acb->hostid_bit = (1 << acb->scsi_host->this_id);
4282
4283 for (i = 0; i < DC395x_MAX_SCSI_ID; i++)
4284 acb->dcb_map[i] = 0;
4285
4286 acb->msg_len = 0;
4287
4288
4289 for (i = 0; i < acb->srb_count - 1; i++)
4290 list_add_tail(&acb->srb_array[i].list, &acb->srb_free_list);
4291 }
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306 static void adapter_init_scsi_host(struct Scsi_Host *host)
4307 {
4308 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
4309 struct NvRamType *eeprom = &acb->eeprom;
4310
4311 host->max_cmd_len = 24;
4312 host->can_queue = DC395x_MAX_CMD_QUEUE;
4313 host->cmd_per_lun = DC395x_MAX_CMD_PER_LUN;
4314 host->this_id = (int)eeprom->scsi_id;
4315 host->io_port = acb->io_port_base;
4316 host->n_io_port = acb->io_port_len;
4317 host->dma_channel = -1;
4318 host->unique_id = acb->io_port_base;
4319 host->irq = acb->irq_level;
4320 acb->last_reset = jiffies;
4321
4322 host->max_id = 16;
4323 if (host->max_id - 1 == eeprom->scsi_id)
4324 host->max_id--;
4325
4326 if (eeprom->channel_cfg & NAC_SCANLUN)
4327 host->max_lun = 8;
4328 else
4329 host->max_lun = 1;
4330 }
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342 static void adapter_init_chip(struct AdapterCtlBlk *acb)
4343 {
4344 struct NvRamType *eeprom = &acb->eeprom;
4345
4346
4347 DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
4348 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
4349
4350
4351 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
4352
4353
4354 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
4355 udelay(20);
4356
4357
4358 acb->config = HCC_AUTOTERM | HCC_PARITY;
4359 if (DC395x_read8(acb, TRM_S1040_GEN_STATUS) & WIDESCSI)
4360 acb->config |= HCC_WIDE_CARD;
4361
4362 if (eeprom->channel_cfg & NAC_POWERON_SCSI_RESET)
4363 acb->config |= HCC_SCSI_RESET;
4364
4365 if (acb->config & HCC_SCSI_RESET) {
4366 dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n");
4367 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
4368
4369
4370
4371 udelay(500);
4372
4373 acb->last_reset =
4374 jiffies + HZ / 2 +
4375 HZ * acb->eeprom.delay_time;
4376
4377
4378 }
4379 }
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395 static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port,
4396 u32 io_port_len, unsigned int irq)
4397 {
4398 if (!request_region(io_port, io_port_len, DC395X_NAME)) {
4399 dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port);
4400 goto failed;
4401 }
4402
4403 acb->io_port_base = io_port;
4404 acb->io_port_len = io_port_len;
4405
4406 if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) {
4407
4408 dprintkl(KERN_INFO, "Failed to register IRQ\n");
4409 goto failed;
4410 }
4411
4412 acb->irq_level = irq;
4413
4414
4415 check_eeprom(&acb->eeprom, io_port);
4416 print_eeprom_settings(&acb->eeprom);
4417
4418
4419 adapter_init_params(acb);
4420
4421
4422 adapter_print_config(acb);
4423
4424 if (adapter_sg_tables_alloc(acb)) {
4425 dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n");
4426 goto failed;
4427 }
4428 adapter_init_scsi_host(acb->scsi_host);
4429 adapter_init_chip(acb);
4430 set_basic_config(acb);
4431
4432 dprintkdbg(DBG_0,
4433 "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p "
4434 "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
4435 acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk),
4436 sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk));
4437 return 0;
4438
4439 failed:
4440 if (acb->irq_level)
4441 free_irq(acb->irq_level, acb);
4442 if (acb->io_port_base)
4443 release_region(acb->io_port_base, acb->io_port_len);
4444 adapter_sg_tables_free(acb);
4445
4446 return 1;
4447 }
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457 static void adapter_uninit_chip(struct AdapterCtlBlk *acb)
4458 {
4459
4460 DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0);
4461 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0);
4462
4463
4464 if (acb->config & HCC_SCSI_RESET)
4465 reset_scsi_bus(acb);
4466
4467
4468 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
4469 }
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480 static void adapter_uninit(struct AdapterCtlBlk *acb)
4481 {
4482 unsigned long flags;
4483 DC395x_LOCK_IO(acb->scsi_host, flags);
4484
4485
4486 if (timer_pending(&acb->waiting_timer))
4487 del_timer(&acb->waiting_timer);
4488 if (timer_pending(&acb->selto_timer))
4489 del_timer(&acb->selto_timer);
4490
4491 adapter_uninit_chip(acb);
4492 adapter_remove_and_free_all_devices(acb);
4493 DC395x_UNLOCK_IO(acb->scsi_host, flags);
4494
4495 if (acb->irq_level)
4496 free_irq(acb->irq_level, acb);
4497 if (acb->io_port_base)
4498 release_region(acb->io_port_base, acb->io_port_len);
4499
4500 adapter_sg_tables_free(acb);
4501 }
4502
4503
4504 #undef YESNO
4505 #define YESNO(YN) \
4506 if (YN) seq_printf(m, " Yes ");\
4507 else seq_printf(m, " No ")
4508
4509 static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
4510 {
4511 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
4512 int spd, spd1;
4513 struct DeviceCtlBlk *dcb;
4514 unsigned long flags;
4515 int dev;
4516
4517 seq_puts(m, DC395X_BANNER " PCI SCSI Host Adapter\n"
4518 " Driver Version " DC395X_VERSION "\n");
4519
4520 DC395x_LOCK_IO(acb->scsi_host, flags);
4521
4522 seq_printf(m, "SCSI Host Nr %i, ", host->host_no);
4523 seq_printf(m, "DC395U/UW/F DC315/U %s\n",
4524 (acb->config & HCC_WIDE_CARD) ? "Wide" : "");
4525 seq_printf(m, "io_port_base 0x%04lx, ", acb->io_port_base);
4526 seq_printf(m, "irq_level 0x%04x, ", acb->irq_level);
4527 seq_printf(m, " SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
4528
4529 seq_printf(m, "MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun);
4530 seq_printf(m, "AdapterID %i\n", host->this_id);
4531
4532 seq_printf(m, "tag_max_num %i", acb->tag_max_num);
4533
4534 seq_printf(m, ", FilterCfg 0x%02x",
4535 DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1));
4536 seq_printf(m, ", DelayReset %is\n", acb->eeprom.delay_time);
4537
4538
4539 seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list));
4540 seq_printf(m, "Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
4541 acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2],
4542 acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5],
4543 acb->dcb_map[6], acb->dcb_map[7]);
4544 seq_printf(m, " %02x %02x %02x %02x %02x %02x %02x %02x\n",
4545 acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10],
4546 acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13],
4547 acb->dcb_map[14], acb->dcb_map[15]);
4548
4549 seq_puts(m,
4550 "Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
4551
4552 dev = 0;
4553 list_for_each_entry(dcb, &acb->dcb_list, list) {
4554 int nego_period;
4555 seq_printf(m, "%02i %02i %02i ", dev, dcb->target_id,
4556 dcb->target_lun);
4557 YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK);
4558 YESNO(dcb->sync_offset);
4559 YESNO(dcb->sync_period & WIDE_SYNC);
4560 YESNO(dcb->dev_mode & NTC_DO_DISCONNECT);
4561 YESNO(dcb->dev_mode & NTC_DO_SEND_START);
4562 YESNO(dcb->sync_mode & EN_TAG_QUEUEING);
4563 nego_period = clock_period[dcb->sync_period & 0x07] << 2;
4564 if (dcb->sync_offset)
4565 seq_printf(m, " %03i ns ", nego_period);
4566 else
4567 seq_printf(m, " (%03i ns)", (dcb->min_nego_period << 2));
4568
4569 if (dcb->sync_offset & 0x0f) {
4570 spd = 1000 / (nego_period);
4571 spd1 = 1000 % (nego_period);
4572 spd1 = (spd1 * 10 + nego_period / 2) / (nego_period);
4573 seq_printf(m, " %2i.%1i M %02i ", spd, spd1,
4574 (dcb->sync_offset & 0x0f));
4575 } else
4576 seq_puts(m, " ");
4577
4578
4579 seq_printf(m, " %02i\n", dcb->max_command);
4580 dev++;
4581 }
4582
4583 if (timer_pending(&acb->waiting_timer))
4584 seq_puts(m, "Waiting queue timer running\n");
4585 else
4586 seq_putc(m, '\n');
4587
4588 list_for_each_entry(dcb, &acb->dcb_list, list) {
4589 struct ScsiReqBlk *srb;
4590 if (!list_empty(&dcb->srb_waiting_list))
4591 seq_printf(m, "DCB (%02i-%i): Waiting: %i:",
4592 dcb->target_id, dcb->target_lun,
4593 list_size(&dcb->srb_waiting_list));
4594 list_for_each_entry(srb, &dcb->srb_waiting_list, list)
4595 seq_printf(m, " %p", srb->cmd);
4596 if (!list_empty(&dcb->srb_going_list))
4597 seq_printf(m, "\nDCB (%02i-%i): Going : %i:",
4598 dcb->target_id, dcb->target_lun,
4599 list_size(&dcb->srb_going_list));
4600 list_for_each_entry(srb, &dcb->srb_going_list, list)
4601 seq_printf(m, " %p", srb->cmd);
4602 if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
4603 seq_putc(m, '\n');
4604 }
4605
4606 if (debug_enabled(DBG_1)) {
4607 seq_printf(m, "DCB list for ACB %p:\n", acb);
4608 list_for_each_entry(dcb, &acb->dcb_list, list) {
4609 seq_printf(m, "%p -> ", dcb);
4610 }
4611 seq_puts(m, "END\n");
4612 }
4613
4614 DC395x_UNLOCK_IO(acb->scsi_host, flags);
4615 return 0;
4616 }
4617
4618
4619 static struct scsi_host_template dc395x_driver_template = {
4620 .module = THIS_MODULE,
4621 .proc_name = DC395X_NAME,
4622 .show_info = dc395x_show_info,
4623 .name = DC395X_BANNER " " DC395X_VERSION,
4624 .queuecommand = dc395x_queue_command,
4625 .bios_param = dc395x_bios_param,
4626 .slave_alloc = dc395x_slave_alloc,
4627 .slave_destroy = dc395x_slave_destroy,
4628 .can_queue = DC395x_MAX_CAN_QUEUE,
4629 .this_id = 7,
4630 .sg_tablesize = DC395x_MAX_SG_TABLESIZE,
4631 .cmd_per_lun = DC395x_MAX_CMD_PER_LUN,
4632 .eh_abort_handler = dc395x_eh_abort,
4633 .eh_bus_reset_handler = dc395x_eh_bus_reset,
4634 .dma_boundary = PAGE_SIZE - 1,
4635 };
4636
4637
4638
4639
4640
4641
4642 static void banner_display(void)
4643 {
4644 static int banner_done = 0;
4645 if (!banner_done)
4646 {
4647 dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION);
4648 banner_done = 1;
4649 }
4650 }
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666 static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
4667 {
4668 struct Scsi_Host *scsi_host = NULL;
4669 struct AdapterCtlBlk *acb = NULL;
4670 unsigned long io_port_base;
4671 unsigned int io_port_len;
4672 unsigned int irq;
4673
4674 dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev));
4675 banner_display();
4676
4677 if (pci_enable_device(dev))
4678 {
4679 dprintkl(KERN_INFO, "PCI Enable device failed.\n");
4680 return -ENODEV;
4681 }
4682 io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK;
4683 io_port_len = pci_resource_len(dev, 0);
4684 irq = dev->irq;
4685 dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq);
4686
4687
4688 scsi_host = scsi_host_alloc(&dc395x_driver_template,
4689 sizeof(struct AdapterCtlBlk));
4690 if (!scsi_host) {
4691 dprintkl(KERN_INFO, "scsi_host_alloc failed\n");
4692 goto fail;
4693 }
4694 acb = (struct AdapterCtlBlk*)scsi_host->hostdata;
4695 acb->scsi_host = scsi_host;
4696 acb->dev = dev;
4697
4698
4699 if (adapter_init(acb, io_port_base, io_port_len, irq)) {
4700 dprintkl(KERN_INFO, "adapter init failed\n");
4701 goto fail;
4702 }
4703
4704 pci_set_master(dev);
4705
4706
4707 if (scsi_add_host(scsi_host, &dev->dev)) {
4708 dprintkl(KERN_ERR, "scsi_add_host failed\n");
4709 goto fail;
4710 }
4711 pci_set_drvdata(dev, scsi_host);
4712 scsi_scan_host(scsi_host);
4713
4714 return 0;
4715
4716 fail:
4717 if (acb != NULL)
4718 adapter_uninit(acb);
4719 if (scsi_host != NULL)
4720 scsi_host_put(scsi_host);
4721 pci_disable_device(dev);
4722 return -ENODEV;
4723 }
4724
4725
4726
4727
4728
4729
4730
4731
4732 static void dc395x_remove_one(struct pci_dev *dev)
4733 {
4734 struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
4735 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata);
4736
4737 dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb);
4738
4739 scsi_remove_host(scsi_host);
4740 adapter_uninit(acb);
4741 pci_disable_device(dev);
4742 scsi_host_put(scsi_host);
4743 }
4744
4745
4746 static struct pci_device_id dc395x_pci_table[] = {
4747 {
4748 .vendor = PCI_VENDOR_ID_TEKRAM,
4749 .device = PCI_DEVICE_ID_TEKRAM_TRMS1040,
4750 .subvendor = PCI_ANY_ID,
4751 .subdevice = PCI_ANY_ID,
4752 },
4753 {}
4754 };
4755 MODULE_DEVICE_TABLE(pci, dc395x_pci_table);
4756
4757
4758 static struct pci_driver dc395x_driver = {
4759 .name = DC395X_NAME,
4760 .id_table = dc395x_pci_table,
4761 .probe = dc395x_init_one,
4762 .remove = dc395x_remove_one,
4763 };
4764
4765
4766
4767
4768
4769
4770
4771 static int __init dc395x_module_init(void)
4772 {
4773 return pci_register_driver(&dc395x_driver);
4774 }
4775
4776
4777
4778
4779
4780 static void __exit dc395x_module_exit(void)
4781 {
4782 pci_unregister_driver(&dc395x_driver);
4783 }
4784
4785
4786 module_init(dc395x_module_init);
4787 module_exit(dc395x_module_exit);
4788
4789 MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff");
4790 MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series");
4791 MODULE_LICENSE("GPL");