This source file includes following definitions.
- qla1280_setup
- qla1280_read_nvram
- qla1280_info
- qla1280_queuecommand_lck
- DEF_SCSI_QCMD
- qla1280_mailbox_timeout
- _qla1280_wait_for_single_command
- qla1280_wait_for_single_command
- qla1280_wait_for_pending_commands
- qla1280_error_action
- qla1280_eh_abort
- qla1280_eh_device_reset
- qla1280_eh_bus_reset
- qla1280_eh_adapter_reset
- qla1280_biosparam
- qla1280_disable_intrs
- qla1280_enable_intrs
- qla1280_intr_handler
- qla1280_set_target_parameters
- qla1280_slave_configure
- qla1280_done
- qla1280_return_status
- qla1280_initialize_adapter
- qla1280_request_firmware
- qla1280_chip_diag
- qla1280_load_firmware_pio
- qla1280_load_firmware_dma
- qla1280_start_firmware
- qla1280_load_firmware
- qla1280_init_rings
- qla1280_print_settings
- qla1280_set_target_defaults
- qla1280_set_defaults
- qla1280_config_target
- qla1280_config_bus
- qla1280_nvram_config
- qla1280_get_nvram_word
- qla1280_nvram_request
- qla1280_nv_write
- qla1280_mailbox_command
- qla1280_poll
- qla1280_bus_reset
- qla1280_device_reset
- qla1280_abort_command
- qla1280_reset_adapter
- qla1280_marker
- qla1280_64bit_start_scsi
- qla1280_32bit_start_scsi
- qla1280_req_pkt
- qla1280_isp_cmd
- qla1280_isr
- qla1280_rst_aen
- qla1280_status_entry
- qla1280_error_entry
- qla1280_abort_isp
- qla1280_debounce_register
- qla1280_check_for_dead_scsi_bus
- qla1280_get_target_parameters
- __qla1280_dump_buffer
- __qla1280_print_scsi_cmd
- ql1280_dump_device
- qla1280_setup
- qla1280_get_token
- qla1280_probe_one
- qla1280_remove_one
- qla1280_init
- qla1280_exit
1
2
3
4
5
6
7
8
9
10
11 #define QLA1280_VERSION "3.27.1"
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330 #include <linux/module.h>
331
332 #include <linux/types.h>
333 #include <linux/string.h>
334 #include <linux/errno.h>
335 #include <linux/kernel.h>
336 #include <linux/ioport.h>
337 #include <linux/delay.h>
338 #include <linux/timer.h>
339 #include <linux/pci.h>
340 #include <linux/proc_fs.h>
341 #include <linux/stat.h>
342 #include <linux/pci_ids.h>
343 #include <linux/interrupt.h>
344 #include <linux/init.h>
345 #include <linux/dma-mapping.h>
346 #include <linux/firmware.h>
347
348 #include <asm/io.h>
349 #include <asm/irq.h>
350 #include <asm/byteorder.h>
351 #include <asm/processor.h>
352 #include <asm/types.h>
353
354 #include <scsi/scsi.h>
355 #include <scsi/scsi_cmnd.h>
356 #include <scsi/scsi_device.h>
357 #include <scsi/scsi_host.h>
358 #include <scsi/scsi_tcq.h>
359
360
361
362
363
364
365 #define DEBUG_QLA1280_INTR 0
366 #define DEBUG_PRINT_NVRAM 0
367 #define DEBUG_QLA1280 0
368
369 #define MEMORY_MAPPED_IO 1
370
371 #include "qla1280.h"
372
373 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
374 #define QLA_64BIT_PTR 1
375 #endif
376
377 #define NVRAM_DELAY() udelay(500)
378
379 #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
380 #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
381 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
382 #define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
383 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
384
385
386 static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
387 static void qla1280_remove_one(struct pci_dev *);
388
389
390
391
392 static void qla1280_done(struct scsi_qla_host *);
393 static int qla1280_get_token(char *);
394 static int qla1280_setup(char *s) __init;
395
396
397
398
399 static int qla1280_load_firmware(struct scsi_qla_host *);
400 static int qla1280_init_rings(struct scsi_qla_host *);
401 static int qla1280_nvram_config(struct scsi_qla_host *);
402 static int qla1280_mailbox_command(struct scsi_qla_host *,
403 uint8_t, uint16_t *);
404 static int qla1280_bus_reset(struct scsi_qla_host *, int);
405 static int qla1280_device_reset(struct scsi_qla_host *, int, int);
406 static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
407 static int qla1280_abort_isp(struct scsi_qla_host *);
408 #ifdef QLA_64BIT_PTR
409 static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
410 #else
411 static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
412 #endif
413 static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
414 static void qla1280_poll(struct scsi_qla_host *);
415 static void qla1280_reset_adapter(struct scsi_qla_host *);
416 static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
417 static void qla1280_isp_cmd(struct scsi_qla_host *);
418 static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
419 static void qla1280_rst_aen(struct scsi_qla_host *);
420 static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
421 struct list_head *);
422 static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
423 struct list_head *);
424 static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
425 static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
426 static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
427 static request_t *qla1280_req_pkt(struct scsi_qla_host *);
428 static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
429 unsigned int);
430 static void qla1280_get_target_parameters(struct scsi_qla_host *,
431 struct scsi_device *);
432 static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
433
434
435 static struct qla_driver_setup driver_setup;
436
437
438
439
440 static inline uint16_t
441 qla1280_data_direction(struct scsi_cmnd *cmnd)
442 {
443 switch(cmnd->sc_data_direction) {
444 case DMA_FROM_DEVICE:
445 return BIT_5;
446 case DMA_TO_DEVICE:
447 return BIT_6;
448 case DMA_BIDIRECTIONAL:
449 return BIT_5 | BIT_6;
450
451
452
453
454
455 case DMA_NONE:
456 default:
457 return 0;
458 }
459 }
460
461 #if DEBUG_QLA1280
462 static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
463 static void __qla1280_dump_buffer(char *, int);
464 #endif
465
466
467
468
469
470 #ifdef MODULE
471 static char *qla1280;
472
473
474 module_param(qla1280, charp, 0);
475 #else
476 __setup("qla1280=", qla1280_setup);
477 #endif
478
479
480
481
482
483
484
485
486 #define CMD_SP(Cmnd) &Cmnd->SCp
487 #define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
488 #define CMD_CDBP(Cmnd) Cmnd->cmnd
489 #define CMD_SNSP(Cmnd) Cmnd->sense_buffer
490 #define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
491 #define CMD_RESULT(Cmnd) Cmnd->result
492 #define CMD_HANDLE(Cmnd) Cmnd->host_scribble
493 #define CMD_REQUEST(Cmnd) Cmnd->request->cmd
494
495 #define CMD_HOST(Cmnd) Cmnd->device->host
496 #define SCSI_BUS_32(Cmnd) Cmnd->device->channel
497 #define SCSI_TCN_32(Cmnd) Cmnd->device->id
498 #define SCSI_LUN_32(Cmnd) Cmnd->device->lun
499
500
501
502
503
504
505 struct qla_boards {
506 char *name;
507 int numPorts;
508 int fw_index;
509 };
510
511
512 static struct pci_device_id qla1280_pci_tbl[] = {
513 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
514 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
515 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
516 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
517 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
518 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
519 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
520 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
521 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
522 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
523 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
524 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
525 {0,}
526 };
527 MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
528
529 DEFINE_MUTEX(qla1280_firmware_mutex);
530
531 struct qla_fw {
532 char *fwname;
533 const struct firmware *fw;
534 };
535
536 #define QL_NUM_FW_IMAGES 3
537
538 struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
539 {"qlogic/1040.bin", NULL},
540 {"qlogic/1280.bin", NULL},
541 {"qlogic/12160.bin", NULL},
542 };
543
544
545 static struct qla_boards ql1280_board_tbl[] = {
546 {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
547 {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
548 {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
549 {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
550 {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
551 {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
552 {.name = " ", .numPorts = 0, .fw_index = -1},
553 };
554
555 static int qla1280_verbose = 1;
556
557 #if DEBUG_QLA1280
558 static int ql_debug_level = 1;
559 #define dprintk(level, format, a...) \
560 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
561 #define qla1280_dump_buffer(level, buf, size) \
562 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
563 #define qla1280_print_scsi_cmd(level, cmd) \
564 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
565 #else
566 #define ql_debug_level 0
567 #define dprintk(level, format, a...) do{}while(0)
568 #define qla1280_dump_buffer(a, b, c) do{}while(0)
569 #define qla1280_print_scsi_cmd(a, b) do{}while(0)
570 #endif
571
572 #define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
573 #define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
574 #define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
575 #define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
576
577
578 static int qla1280_read_nvram(struct scsi_qla_host *ha)
579 {
580 uint16_t *wptr;
581 uint8_t chksum;
582 int cnt, i;
583 struct nvram *nv;
584
585 ENTER("qla1280_read_nvram");
586
587 if (driver_setup.no_nvram)
588 return 1;
589
590 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
591
592 wptr = (uint16_t *)&ha->nvram;
593 nv = &ha->nvram;
594 chksum = 0;
595 for (cnt = 0; cnt < 3; cnt++) {
596 *wptr = qla1280_get_nvram_word(ha, cnt);
597 chksum += *wptr & 0xff;
598 chksum += (*wptr >> 8) & 0xff;
599 wptr++;
600 }
601
602 if (nv->id0 != 'I' || nv->id1 != 'S' ||
603 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
604 dprintk(2, "Invalid nvram ID or version!\n");
605 chksum = 1;
606 } else {
607 for (; cnt < sizeof(struct nvram); cnt++) {
608 *wptr = qla1280_get_nvram_word(ha, cnt);
609 chksum += *wptr & 0xff;
610 chksum += (*wptr >> 8) & 0xff;
611 wptr++;
612 }
613 }
614
615 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
616 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
617 nv->version);
618
619
620 if (chksum) {
621 if (!driver_setup.no_nvram)
622 printk(KERN_WARNING "scsi(%ld): Unable to identify or "
623 "validate NVRAM checksum, using default "
624 "settings\n", ha->host_no);
625 ha->nvram_valid = 0;
626 } else
627 ha->nvram_valid = 1;
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645 nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
646 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
647 for(i = 0; i < MAX_BUSES; i++) {
648 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
649 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
650 }
651 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
652 LEAVE("qla1280_read_nvram");
653
654 return chksum;
655 }
656
657
658
659
660
661 static const char *
662 qla1280_info(struct Scsi_Host *host)
663 {
664 static char qla1280_scsi_name_buffer[125];
665 char *bp;
666 struct scsi_qla_host *ha;
667 struct qla_boards *bdp;
668
669 bp = &qla1280_scsi_name_buffer[0];
670 ha = (struct scsi_qla_host *)host->hostdata;
671 bdp = &ql1280_board_tbl[ha->devnum];
672 memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
673
674 sprintf (bp,
675 "QLogic %s PCI to SCSI Host Adapter\n"
676 " Firmware version: %2d.%02d.%02d, Driver version %s",
677 &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
678 QLA1280_VERSION);
679 return bp;
680 }
681
682
683
684
685
686
687
688
689
690
691
692
693 static int
694 qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
695 {
696 struct Scsi_Host *host = cmd->device->host;
697 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
698 struct srb *sp = (struct srb *)CMD_SP(cmd);
699 int status;
700
701 cmd->scsi_done = fn;
702 sp->cmd = cmd;
703 sp->flags = 0;
704 sp->wait = NULL;
705 CMD_HANDLE(cmd) = (unsigned char *)NULL;
706
707 qla1280_print_scsi_cmd(5, cmd);
708
709 #ifdef QLA_64BIT_PTR
710
711
712
713
714
715
716 status = qla1280_64bit_start_scsi(ha, sp);
717 #else
718 status = qla1280_32bit_start_scsi(ha, sp);
719 #endif
720 return status;
721 }
722
723 static DEF_SCSI_QCMD(qla1280_queuecommand)
724
725 enum action {
726 ABORT_COMMAND,
727 DEVICE_RESET,
728 BUS_RESET,
729 ADAPTER_RESET,
730 };
731
732
733 static void qla1280_mailbox_timeout(struct timer_list *t)
734 {
735 struct scsi_qla_host *ha = from_timer(ha, t, mailbox_timer);
736 struct device_reg __iomem *reg;
737 reg = ha->iobase;
738
739 ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0);
740 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
741 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
742 RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus));
743 complete(ha->mailbox_wait);
744 }
745
746 static int
747 _qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
748 struct completion *wait)
749 {
750 int status = FAILED;
751 struct scsi_cmnd *cmd = sp->cmd;
752
753 spin_unlock_irq(ha->host->host_lock);
754 wait_for_completion_timeout(wait, 4*HZ);
755 spin_lock_irq(ha->host->host_lock);
756 sp->wait = NULL;
757 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
758 status = SUCCESS;
759 (*cmd->scsi_done)(cmd);
760 }
761 return status;
762 }
763
764 static int
765 qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
766 {
767 DECLARE_COMPLETION_ONSTACK(wait);
768
769 sp->wait = &wait;
770 return _qla1280_wait_for_single_command(ha, sp, &wait);
771 }
772
773 static int
774 qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
775 {
776 int cnt;
777 int status;
778 struct srb *sp;
779 struct scsi_cmnd *cmd;
780
781 status = SUCCESS;
782
783
784
785
786
787 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
788 sp = ha->outstanding_cmds[cnt];
789 if (sp) {
790 cmd = sp->cmd;
791
792 if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
793 continue;
794 if (target >= 0 && SCSI_TCN_32(cmd) != target)
795 continue;
796
797 status = qla1280_wait_for_single_command(ha, sp);
798 if (status == FAILED)
799 break;
800 }
801 }
802 return status;
803 }
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819 static int
820 qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
821 {
822 struct scsi_qla_host *ha;
823 int bus, target, lun;
824 struct srb *sp;
825 int i, found;
826 int result=FAILED;
827 int wait_for_bus=-1;
828 int wait_for_target = -1;
829 DECLARE_COMPLETION_ONSTACK(wait);
830
831 ENTER("qla1280_error_action");
832
833 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
834 sp = (struct srb *)CMD_SP(cmd);
835 bus = SCSI_BUS_32(cmd);
836 target = SCSI_TCN_32(cmd);
837 lun = SCSI_LUN_32(cmd);
838
839 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
840 RD_REG_WORD(&ha->iobase->istatus));
841
842 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
843 RD_REG_WORD(&ha->iobase->host_cmd),
844 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
845
846 if (qla1280_verbose)
847 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
848 "Handle=0x%p, action=0x%x\n",
849 ha->host_no, cmd, CMD_HANDLE(cmd), action);
850
851
852
853
854
855
856
857 found = -1;
858 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
859 if (sp == ha->outstanding_cmds[i]) {
860 found = i;
861 sp->wait = &wait;
862 break;
863 }
864 }
865
866 if (found < 0) {
867 result = SUCCESS;
868 if (qla1280_verbose) {
869 printk(KERN_INFO
870 "scsi(%ld:%d:%d:%d): specified command has "
871 "already completed.\n", ha->host_no, bus,
872 target, lun);
873 }
874 }
875
876 switch (action) {
877
878 case ABORT_COMMAND:
879 dprintk(1, "qla1280: RISC aborting command\n");
880
881
882
883
884
885 if (found >= 0)
886 qla1280_abort_command(ha, sp, found);
887 break;
888
889 case DEVICE_RESET:
890 if (qla1280_verbose)
891 printk(KERN_INFO
892 "scsi(%ld:%d:%d:%d): Queueing device reset "
893 "command.\n", ha->host_no, bus, target, lun);
894 if (qla1280_device_reset(ha, bus, target) == 0) {
895
896 wait_for_bus = bus;
897 wait_for_target = target;
898 }
899 break;
900
901 case BUS_RESET:
902 if (qla1280_verbose)
903 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
904 "reset.\n", ha->host_no, bus);
905 if (qla1280_bus_reset(ha, bus) == 0) {
906
907 wait_for_bus = bus;
908 }
909 break;
910
911 case ADAPTER_RESET:
912 default:
913 if (qla1280_verbose) {
914 printk(KERN_INFO
915 "scsi(%ld): Issued ADAPTER RESET\n",
916 ha->host_no);
917 printk(KERN_INFO "scsi(%ld): I/O processing will "
918 "continue automatically\n", ha->host_no);
919 }
920 ha->flags.reset_active = 1;
921
922 if (qla1280_abort_isp(ha) != 0) {
923 result = FAILED;
924 }
925
926 ha->flags.reset_active = 0;
927 }
928
929
930
931
932
933
934
935
936
937
938 if (found >= 0)
939 result = _qla1280_wait_for_single_command(ha, sp, &wait);
940
941 if (action == ABORT_COMMAND && result != SUCCESS) {
942 printk(KERN_WARNING
943 "scsi(%li:%i:%i:%i): "
944 "Unable to abort command!\n",
945 ha->host_no, bus, target, lun);
946 }
947
948
949
950
951
952
953
954
955
956
957
958 if (result == SUCCESS && wait_for_bus >= 0) {
959 result = qla1280_wait_for_pending_commands(ha,
960 wait_for_bus, wait_for_target);
961 }
962
963 dprintk(1, "RESET returning %d\n", result);
964
965 LEAVE("qla1280_error_action");
966 return result;
967 }
968
969
970
971
972
973 static int
974 qla1280_eh_abort(struct scsi_cmnd * cmd)
975 {
976 int rc;
977
978 spin_lock_irq(cmd->device->host->host_lock);
979 rc = qla1280_error_action(cmd, ABORT_COMMAND);
980 spin_unlock_irq(cmd->device->host->host_lock);
981
982 return rc;
983 }
984
985
986
987
988
989 static int
990 qla1280_eh_device_reset(struct scsi_cmnd *cmd)
991 {
992 int rc;
993
994 spin_lock_irq(cmd->device->host->host_lock);
995 rc = qla1280_error_action(cmd, DEVICE_RESET);
996 spin_unlock_irq(cmd->device->host->host_lock);
997
998 return rc;
999 }
1000
1001
1002
1003
1004
1005 static int
1006 qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
1007 {
1008 int rc;
1009
1010 spin_lock_irq(cmd->device->host->host_lock);
1011 rc = qla1280_error_action(cmd, BUS_RESET);
1012 spin_unlock_irq(cmd->device->host->host_lock);
1013
1014 return rc;
1015 }
1016
1017
1018
1019
1020
1021 static int
1022 qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
1023 {
1024 int rc;
1025
1026 spin_lock_irq(cmd->device->host->host_lock);
1027 rc = qla1280_error_action(cmd, ADAPTER_RESET);
1028 spin_unlock_irq(cmd->device->host->host_lock);
1029
1030 return rc;
1031 }
1032
1033 static int
1034 qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1035 sector_t capacity, int geom[])
1036 {
1037 int heads, sectors, cylinders;
1038
1039 heads = 64;
1040 sectors = 32;
1041 cylinders = (unsigned long)capacity / (heads * sectors);
1042 if (cylinders > 1024) {
1043 heads = 255;
1044 sectors = 63;
1045 cylinders = (unsigned long)capacity / (heads * sectors);
1046
1047
1048 }
1049
1050 geom[0] = heads;
1051 geom[1] = sectors;
1052 geom[2] = cylinders;
1053
1054 return 0;
1055 }
1056
1057
1058
1059 static inline void
1060 qla1280_disable_intrs(struct scsi_qla_host *ha)
1061 {
1062 WRT_REG_WORD(&ha->iobase->ictrl, 0);
1063 RD_REG_WORD(&ha->iobase->ictrl);
1064 }
1065
1066
1067 static inline void
1068 qla1280_enable_intrs(struct scsi_qla_host *ha)
1069 {
1070 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1071 RD_REG_WORD(&ha->iobase->ictrl);
1072 }
1073
1074
1075
1076
1077
1078 static irqreturn_t
1079 qla1280_intr_handler(int irq, void *dev_id)
1080 {
1081 struct scsi_qla_host *ha;
1082 struct device_reg __iomem *reg;
1083 u16 data;
1084 int handled = 0;
1085
1086 ENTER_INTR ("qla1280_intr_handler");
1087 ha = (struct scsi_qla_host *)dev_id;
1088
1089 spin_lock(ha->host->host_lock);
1090
1091 ha->isr_count++;
1092 reg = ha->iobase;
1093
1094 qla1280_disable_intrs(ha);
1095
1096 data = qla1280_debounce_register(®->istatus);
1097
1098 if (data & RISC_INT) {
1099 qla1280_isr(ha, &ha->done_q);
1100 handled = 1;
1101 }
1102 if (!list_empty(&ha->done_q))
1103 qla1280_done(ha);
1104
1105 spin_unlock(ha->host->host_lock);
1106
1107 qla1280_enable_intrs(ha);
1108
1109 LEAVE_INTR("qla1280_intr_handler");
1110 return IRQ_RETVAL(handled);
1111 }
1112
1113
1114 static int
1115 qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1116 {
1117 uint8_t mr;
1118 uint16_t mb[MAILBOX_REGISTER_COUNT];
1119 struct nvram *nv;
1120 int status, lun;
1121
1122 nv = &ha->nvram;
1123
1124 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
1125
1126
1127 mb[0] = MBC_SET_TARGET_PARAMETERS;
1128 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1129 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
1130 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
1131 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
1132 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
1133 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
1134 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
1135 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
1136 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
1137
1138 if (IS_ISP1x160(ha)) {
1139 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
1140 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
1141 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
1142 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
1143 mr |= BIT_6;
1144 } else {
1145 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
1146 }
1147 mb[3] |= nv->bus[bus].target[target].sync_period;
1148
1149 status = qla1280_mailbox_command(ha, mr, mb);
1150
1151
1152 for (lun = 0; lun < MAX_LUNS; lun++) {
1153 mb[0] = MBC_SET_DEVICE_QUEUE;
1154 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1155 mb[1] |= lun;
1156 mb[2] = nv->bus[bus].max_queue_depth;
1157 mb[3] = nv->bus[bus].target[target].execution_throttle;
1158 status |= qla1280_mailbox_command(ha, 0x0f, mb);
1159 }
1160
1161 if (status)
1162 printk(KERN_WARNING "scsi(%ld:%i:%i): "
1163 "qla1280_set_target_parameters() failed\n",
1164 ha->host_no, bus, target);
1165 return status;
1166 }
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180 static int
1181 qla1280_slave_configure(struct scsi_device *device)
1182 {
1183 struct scsi_qla_host *ha;
1184 int default_depth = 3;
1185 int bus = device->channel;
1186 int target = device->id;
1187 int status = 0;
1188 struct nvram *nv;
1189 unsigned long flags;
1190
1191 ha = (struct scsi_qla_host *)device->host->hostdata;
1192 nv = &ha->nvram;
1193
1194 if (qla1280_check_for_dead_scsi_bus(ha, bus))
1195 return 1;
1196
1197 if (device->tagged_supported &&
1198 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
1199 scsi_change_queue_depth(device, ha->bus_settings[bus].hiwat);
1200 } else {
1201 scsi_change_queue_depth(device, default_depth);
1202 }
1203
1204 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
1205 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
1206 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
1207
1208 if (driver_setup.no_sync ||
1209 (driver_setup.sync_mask &&
1210 (~driver_setup.sync_mask & (1 << target))))
1211 nv->bus[bus].target[target].parameter.enable_sync = 0;
1212 if (driver_setup.no_wide ||
1213 (driver_setup.wide_mask &&
1214 (~driver_setup.wide_mask & (1 << target))))
1215 nv->bus[bus].target[target].parameter.enable_wide = 0;
1216 if (IS_ISP1x160(ha)) {
1217 if (driver_setup.no_ppr ||
1218 (driver_setup.ppr_mask &&
1219 (~driver_setup.ppr_mask & (1 << target))))
1220 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
1221 }
1222
1223 spin_lock_irqsave(ha->host->host_lock, flags);
1224 if (nv->bus[bus].target[target].parameter.enable_sync)
1225 status = qla1280_set_target_parameters(ha, bus, target);
1226 qla1280_get_target_parameters(ha, device);
1227 spin_unlock_irqrestore(ha->host->host_lock, flags);
1228 return status;
1229 }
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239 static void
1240 qla1280_done(struct scsi_qla_host *ha)
1241 {
1242 struct srb *sp;
1243 struct list_head *done_q;
1244 int bus, target, lun;
1245 struct scsi_cmnd *cmd;
1246
1247 ENTER("qla1280_done");
1248
1249 done_q = &ha->done_q;
1250
1251 while (!list_empty(done_q)) {
1252 sp = list_entry(done_q->next, struct srb, list);
1253
1254 list_del(&sp->list);
1255
1256 cmd = sp->cmd;
1257 bus = SCSI_BUS_32(cmd);
1258 target = SCSI_TCN_32(cmd);
1259 lun = SCSI_LUN_32(cmd);
1260
1261 switch ((CMD_RESULT(cmd) >> 16)) {
1262 case DID_RESET:
1263
1264 if (!ha->flags.abort_isp_active)
1265 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1266 break;
1267 case DID_ABORT:
1268 sp->flags &= ~SRB_ABORT_PENDING;
1269 sp->flags |= SRB_ABORTED;
1270 break;
1271 default:
1272 break;
1273 }
1274
1275
1276 scsi_dma_unmap(cmd);
1277
1278
1279 ha->actthreads--;
1280
1281 if (sp->wait == NULL)
1282 (*(cmd)->scsi_done)(cmd);
1283 else
1284 complete(sp->wait);
1285 }
1286 LEAVE("qla1280_done");
1287 }
1288
1289
1290
1291
1292 static int
1293 qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1294 {
1295 int host_status = DID_ERROR;
1296 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1297 uint16_t state_flags = le16_to_cpu(sts->state_flags);
1298 uint32_t residual_length = le32_to_cpu(sts->residual_length);
1299 uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
1300 #if DEBUG_QLA1280_INTR
1301 static char *reason[] = {
1302 "DID_OK",
1303 "DID_NO_CONNECT",
1304 "DID_BUS_BUSY",
1305 "DID_TIME_OUT",
1306 "DID_BAD_TARGET",
1307 "DID_ABORT",
1308 "DID_PARITY",
1309 "DID_ERROR",
1310 "DID_RESET",
1311 "DID_BAD_INTR"
1312 };
1313 #endif
1314
1315 ENTER("qla1280_return_status");
1316
1317 #if DEBUG_QLA1280_INTR
1318
1319
1320
1321
1322 #endif
1323
1324 switch (comp_status) {
1325 case CS_COMPLETE:
1326 host_status = DID_OK;
1327 break;
1328
1329 case CS_INCOMPLETE:
1330 if (!(state_flags & SF_GOT_BUS))
1331 host_status = DID_NO_CONNECT;
1332 else if (!(state_flags & SF_GOT_TARGET))
1333 host_status = DID_BAD_TARGET;
1334 else if (!(state_flags & SF_SENT_CDB))
1335 host_status = DID_ERROR;
1336 else if (!(state_flags & SF_TRANSFERRED_DATA))
1337 host_status = DID_ERROR;
1338 else if (!(state_flags & SF_GOT_STATUS))
1339 host_status = DID_ERROR;
1340 else if (!(state_flags & SF_GOT_SENSE))
1341 host_status = DID_ERROR;
1342 break;
1343
1344 case CS_RESET:
1345 host_status = DID_RESET;
1346 break;
1347
1348 case CS_ABORTED:
1349 host_status = DID_ABORT;
1350 break;
1351
1352 case CS_TIMEOUT:
1353 host_status = DID_TIME_OUT;
1354 break;
1355
1356 case CS_DATA_OVERRUN:
1357 dprintk(2, "Data overrun 0x%x\n", residual_length);
1358 dprintk(2, "qla1280_return_status: response packet data\n");
1359 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
1360 host_status = DID_ERROR;
1361 break;
1362
1363 case CS_DATA_UNDERRUN:
1364 if ((scsi_bufflen(cp) - residual_length) <
1365 cp->underflow) {
1366 printk(KERN_WARNING
1367 "scsi: Underflow detected - retrying "
1368 "command.\n");
1369 host_status = DID_ERROR;
1370 } else {
1371 scsi_set_resid(cp, residual_length);
1372 host_status = DID_OK;
1373 }
1374 break;
1375
1376 default:
1377 host_status = DID_ERROR;
1378 break;
1379 }
1380
1381 #if DEBUG_QLA1280_INTR
1382 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
1383 reason[host_status], scsi_status);
1384 #endif
1385
1386 LEAVE("qla1280_return_status");
1387
1388 return (scsi_status & 0xff) | (host_status << 16);
1389 }
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 static int
1406 qla1280_initialize_adapter(struct scsi_qla_host *ha)
1407 {
1408 struct device_reg __iomem *reg;
1409 int status;
1410 int bus;
1411 unsigned long flags;
1412
1413 ENTER("qla1280_initialize_adapter");
1414
1415
1416 ha->flags.online = 0;
1417 ha->flags.disable_host_adapter = 0;
1418 ha->flags.reset_active = 0;
1419 ha->flags.abort_isp_active = 0;
1420
1421
1422 if (IS_ISP1040(ha))
1423 driver_setup.no_nvram = 1;
1424
1425 dprintk(1, "Configure PCI space for adapter...\n");
1426
1427 reg = ha->iobase;
1428
1429
1430 WRT_REG_WORD(®->semaphore, 0);
1431 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
1432 WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT);
1433 RD_REG_WORD(®->host_cmd);
1434
1435 if (qla1280_read_nvram(ha)) {
1436 dprintk(2, "qla1280_initialize_adapter: failed to read "
1437 "NVRAM\n");
1438 }
1439
1440
1441
1442
1443
1444
1445 spin_lock_irqsave(ha->host->host_lock, flags);
1446
1447 status = qla1280_load_firmware(ha);
1448 if (status) {
1449 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
1450 ha->host_no);
1451 goto out;
1452 }
1453
1454
1455 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
1456 qla1280_nvram_config(ha);
1457
1458 if (ha->flags.disable_host_adapter) {
1459 status = 1;
1460 goto out;
1461 }
1462
1463 status = qla1280_init_rings(ha);
1464 if (status)
1465 goto out;
1466
1467
1468 for (bus = 0; bus < ha->ports; bus++) {
1469 if (!ha->bus_settings[bus].disable_scsi_reset &&
1470 qla1280_bus_reset(ha, bus) &&
1471 qla1280_bus_reset(ha, bus))
1472 ha->bus_settings[bus].scsi_bus_dead = 1;
1473 }
1474
1475 ha->flags.online = 1;
1476 out:
1477 spin_unlock_irqrestore(ha->host->host_lock, flags);
1478
1479 if (status)
1480 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
1481
1482 LEAVE("qla1280_initialize_adapter");
1483 return status;
1484 }
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498 static const struct firmware *
1499 qla1280_request_firmware(struct scsi_qla_host *ha)
1500 {
1501 const struct firmware *fw;
1502 int err;
1503 int index;
1504 char *fwname;
1505
1506 spin_unlock_irq(ha->host->host_lock);
1507 mutex_lock(&qla1280_firmware_mutex);
1508
1509 index = ql1280_board_tbl[ha->devnum].fw_index;
1510 fw = qla1280_fw_tbl[index].fw;
1511 if (fw)
1512 goto out;
1513
1514 fwname = qla1280_fw_tbl[index].fwname;
1515 err = request_firmware(&fw, fwname, &ha->pdev->dev);
1516
1517 if (err) {
1518 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1519 fwname, err);
1520 fw = ERR_PTR(err);
1521 goto unlock;
1522 }
1523 if ((fw->size % 2) || (fw->size < 6)) {
1524 printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
1525 fw->size, fwname);
1526 release_firmware(fw);
1527 fw = ERR_PTR(-EINVAL);
1528 goto unlock;
1529 }
1530
1531 qla1280_fw_tbl[index].fw = fw;
1532
1533 out:
1534 ha->fwver1 = fw->data[0];
1535 ha->fwver2 = fw->data[1];
1536 ha->fwver3 = fw->data[2];
1537 unlock:
1538 mutex_unlock(&qla1280_firmware_mutex);
1539 spin_lock_irq(ha->host->host_lock);
1540 return fw;
1541 }
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553 static int
1554 qla1280_chip_diag(struct scsi_qla_host *ha)
1555 {
1556 uint16_t mb[MAILBOX_REGISTER_COUNT];
1557 struct device_reg __iomem *reg = ha->iobase;
1558 int status = 0;
1559 int cnt;
1560 uint16_t data;
1561 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l);
1562
1563 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
1564
1565
1566 WRT_REG_WORD(®->ictrl, ISP_RESET);
1567
1568
1569
1570
1571
1572
1573
1574
1575 udelay(20);
1576 data = qla1280_debounce_register(®->ictrl);
1577
1578
1579
1580 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
1581 udelay(5);
1582 data = RD_REG_WORD(®->ictrl);
1583 }
1584
1585 if (!cnt)
1586 goto fail;
1587
1588
1589 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
1590
1591 WRT_REG_WORD(®->cfg_1, 0);
1592
1593
1594
1595 WRT_REG_WORD(®->host_cmd, HC_RESET_RISC |
1596 HC_RELEASE_RISC | HC_DISABLE_BIOS);
1597
1598 RD_REG_WORD(®->id_l);
1599 data = qla1280_debounce_register(®->mailbox0);
1600
1601
1602
1603
1604 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
1605 udelay(5);
1606 data = RD_REG_WORD(®->mailbox0);
1607 }
1608
1609 if (!cnt)
1610 goto fail;
1611
1612
1613 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
1614
1615 if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 ||
1616 (RD_REG_WORD(®->mailbox2) != PROD_ID_2 &&
1617 RD_REG_WORD(®->mailbox2) != PROD_ID_2a) ||
1618 RD_REG_WORD(®->mailbox3) != PROD_ID_3 ||
1619 RD_REG_WORD(®->mailbox4) != PROD_ID_4) {
1620 printk(KERN_INFO "qla1280: Wrong product ID = "
1621 "0x%x,0x%x,0x%x,0x%x\n",
1622 RD_REG_WORD(®->mailbox1),
1623 RD_REG_WORD(®->mailbox2),
1624 RD_REG_WORD(®->mailbox3),
1625 RD_REG_WORD(®->mailbox4));
1626 goto fail;
1627 }
1628
1629
1630
1631
1632 qla1280_enable_intrs(ha);
1633
1634 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
1635
1636 mb[0] = MBC_MAILBOX_REGISTER_TEST;
1637 mb[1] = 0xAAAA;
1638 mb[2] = 0x5555;
1639 mb[3] = 0xAA55;
1640 mb[4] = 0x55AA;
1641 mb[5] = 0xA5A5;
1642 mb[6] = 0x5A5A;
1643 mb[7] = 0x2525;
1644
1645 status = qla1280_mailbox_command(ha, 0xff, mb);
1646 if (status)
1647 goto fail;
1648
1649 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
1650 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
1651 mb[7] != 0x2525) {
1652 printk(KERN_INFO "qla1280: Failed mbox check\n");
1653 goto fail;
1654 }
1655
1656 dprintk(3, "qla1280_chip_diag: exiting normally\n");
1657 return 0;
1658 fail:
1659 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
1660 return status;
1661 }
1662
1663 static int
1664 qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1665 {
1666
1667
1668 const struct firmware *fw;
1669 const __le16 *fw_data;
1670 uint16_t risc_address, risc_code_size;
1671 uint16_t mb[MAILBOX_REGISTER_COUNT], i;
1672 int err = 0;
1673
1674 fw = qla1280_request_firmware(ha);
1675 if (IS_ERR(fw))
1676 return PTR_ERR(fw);
1677
1678 fw_data = (const __le16 *)&fw->data[0];
1679 ha->fwstart = __le16_to_cpu(fw_data[2]);
1680
1681
1682 risc_address = ha->fwstart;
1683 fw_data = (const __le16 *)&fw->data[6];
1684 risc_code_size = (fw->size - 6) / 2;
1685
1686 for (i = 0; i < risc_code_size; i++) {
1687 mb[0] = MBC_WRITE_RAM_WORD;
1688 mb[1] = risc_address + i;
1689 mb[2] = __le16_to_cpu(fw_data[i]);
1690
1691 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
1692 if (err) {
1693 printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
1694 ha->host_no);
1695 break;
1696 }
1697 }
1698
1699 return err;
1700 }
1701
1702 #define DUMP_IT_BACK 0
1703 static int
1704 qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1705 {
1706
1707 const struct firmware *fw;
1708 const __le16 *fw_data;
1709 uint16_t risc_address, risc_code_size;
1710 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
1711 int err = 0, num, i;
1712 #if DUMP_IT_BACK
1713 uint8_t *sp, *tbuf;
1714 dma_addr_t p_tbuf;
1715
1716 tbuf = dma_alloc_coherent(&ha->pdev->dev, 8000, &p_tbuf, GFP_KERNEL);
1717 if (!tbuf)
1718 return -ENOMEM;
1719 #endif
1720
1721 fw = qla1280_request_firmware(ha);
1722 if (IS_ERR(fw))
1723 return PTR_ERR(fw);
1724
1725 fw_data = (const __le16 *)&fw->data[0];
1726 ha->fwstart = __le16_to_cpu(fw_data[2]);
1727
1728
1729 risc_address = ha->fwstart;
1730 fw_data = (const __le16 *)&fw->data[6];
1731 risc_code_size = (fw->size - 6) / 2;
1732
1733 dprintk(1, "%s: DMA RISC code (%i) words\n",
1734 __func__, risc_code_size);
1735
1736 num = 0;
1737 while (risc_code_size > 0) {
1738 int warn __attribute__((unused)) = 0;
1739
1740 cnt = 2000 >> 1;
1741
1742 if (cnt > risc_code_size)
1743 cnt = risc_code_size;
1744
1745 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
1746 "%d,%d(0x%x)\n",
1747 fw_data, cnt, num, risc_address);
1748 for(i = 0; i < cnt; i++)
1749 ((__le16 *)ha->request_ring)[i] = fw_data[i];
1750
1751 mb[0] = MBC_LOAD_RAM;
1752 mb[1] = risc_address;
1753 mb[4] = cnt;
1754 mb[3] = ha->request_dma & 0xffff;
1755 mb[2] = (ha->request_dma >> 16) & 0xffff;
1756 mb[7] = upper_32_bits(ha->request_dma) & 0xffff;
1757 mb[6] = upper_32_bits(ha->request_dma) >> 16;
1758 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1759 __func__, mb[0],
1760 (void *)(long)ha->request_dma,
1761 mb[6], mb[7], mb[2], mb[3]);
1762 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1763 BIT_1 | BIT_0, mb);
1764 if (err) {
1765 printk(KERN_ERR "scsi(%li): Failed to load partial "
1766 "segment of f\n", ha->host_no);
1767 goto out;
1768 }
1769
1770 #if DUMP_IT_BACK
1771 mb[0] = MBC_DUMP_RAM;
1772 mb[1] = risc_address;
1773 mb[4] = cnt;
1774 mb[3] = p_tbuf & 0xffff;
1775 mb[2] = (p_tbuf >> 16) & 0xffff;
1776 mb[7] = upper_32_bits(p_tbuf) & 0xffff;
1777 mb[6] = upper_32_bits(p_tbuf) >> 16;
1778
1779 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1780 BIT_1 | BIT_0, mb);
1781 if (err) {
1782 printk(KERN_ERR
1783 "Failed to dump partial segment of f/w\n");
1784 goto out;
1785 }
1786 sp = (uint8_t *)ha->request_ring;
1787 for (i = 0; i < (cnt << 1); i++) {
1788 if (tbuf[i] != sp[i] && warn++ < 10) {
1789 printk(KERN_ERR "%s: FW compare error @ "
1790 "byte(0x%x) loop#=%x\n",
1791 __func__, i, num);
1792 printk(KERN_ERR "%s: FWbyte=%x "
1793 "FWfromChip=%x\n",
1794 __func__, sp[i], tbuf[i]);
1795
1796 }
1797 }
1798 #endif
1799 risc_address += cnt;
1800 risc_code_size = risc_code_size - cnt;
1801 fw_data = fw_data + cnt;
1802 num++;
1803 }
1804
1805 out:
1806 #if DUMP_IT_BACK
1807 dma_free_coherent(&ha->pdev->dev, 8000, tbuf, p_tbuf);
1808 #endif
1809 return err;
1810 }
1811
1812 static int
1813 qla1280_start_firmware(struct scsi_qla_host *ha)
1814 {
1815 uint16_t mb[MAILBOX_REGISTER_COUNT];
1816 int err;
1817
1818 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
1819 __func__);
1820
1821
1822 mb[0] = MBC_VERIFY_CHECKSUM;
1823
1824 mb[1] = ha->fwstart;
1825 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
1826 if (err) {
1827 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
1828 return err;
1829 }
1830
1831
1832 dprintk(1, "%s: start firmware running.\n", __func__);
1833 mb[0] = MBC_EXECUTE_FIRMWARE;
1834 mb[1] = ha->fwstart;
1835 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
1836 if (err) {
1837 printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
1838 ha->host_no);
1839 }
1840
1841 return err;
1842 }
1843
1844 static int
1845 qla1280_load_firmware(struct scsi_qla_host *ha)
1846 {
1847
1848 int err;
1849
1850 err = qla1280_chip_diag(ha);
1851 if (err)
1852 goto out;
1853 if (IS_ISP1040(ha))
1854 err = qla1280_load_firmware_pio(ha);
1855 else
1856 err = qla1280_load_firmware_dma(ha);
1857 if (err)
1858 goto out;
1859 err = qla1280_start_firmware(ha);
1860 out:
1861 return err;
1862 }
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877 static int
1878 qla1280_init_rings(struct scsi_qla_host *ha)
1879 {
1880 uint16_t mb[MAILBOX_REGISTER_COUNT];
1881 int status = 0;
1882
1883 ENTER("qla1280_init_rings");
1884
1885
1886 memset(ha->outstanding_cmds, 0,
1887 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
1888
1889
1890 ha->request_ring_ptr = ha->request_ring;
1891 ha->req_ring_index = 0;
1892 ha->req_q_cnt = REQUEST_ENTRY_CNT;
1893
1894 mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
1895 mb[1] = REQUEST_ENTRY_CNT;
1896 mb[3] = ha->request_dma & 0xffff;
1897 mb[2] = (ha->request_dma >> 16) & 0xffff;
1898 mb[4] = 0;
1899 mb[7] = upper_32_bits(ha->request_dma) & 0xffff;
1900 mb[6] = upper_32_bits(ha->request_dma) >> 16;
1901 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
1902 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1903 &mb[0]))) {
1904
1905 ha->response_ring_ptr = ha->response_ring;
1906 ha->rsp_ring_index = 0;
1907
1908 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
1909 mb[1] = RESPONSE_ENTRY_CNT;
1910 mb[3] = ha->response_dma & 0xffff;
1911 mb[2] = (ha->response_dma >> 16) & 0xffff;
1912 mb[5] = 0;
1913 mb[7] = upper_32_bits(ha->response_dma) & 0xffff;
1914 mb[6] = upper_32_bits(ha->response_dma) >> 16;
1915 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
1916 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1917 &mb[0]);
1918 }
1919
1920 if (status)
1921 dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
1922
1923 LEAVE("qla1280_init_rings");
1924 return status;
1925 }
1926
1927 static void
1928 qla1280_print_settings(struct nvram *nv)
1929 {
1930 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
1931 nv->bus[0].config_1.initiator_id);
1932 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
1933 nv->bus[1].config_1.initiator_id);
1934
1935 dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
1936 nv->bus[0].bus_reset_delay);
1937 dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
1938 nv->bus[1].bus_reset_delay);
1939
1940 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
1941 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
1942 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
1943 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
1944
1945 dprintk(1, "qla1280 : async data setup time[0]=%d\n",
1946 nv->bus[0].config_2.async_data_setup_time);
1947 dprintk(1, "qla1280 : async data setup time[1]=%d\n",
1948 nv->bus[1].config_2.async_data_setup_time);
1949
1950 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
1951 nv->bus[0].config_2.req_ack_active_negation);
1952 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
1953 nv->bus[1].config_2.req_ack_active_negation);
1954
1955 dprintk(1, "qla1280 : data line active negation[0]=%d\n",
1956 nv->bus[0].config_2.data_line_active_negation);
1957 dprintk(1, "qla1280 : data line active negation[1]=%d\n",
1958 nv->bus[1].config_2.data_line_active_negation);
1959
1960 dprintk(1, "qla1280 : disable loading risc code=%d\n",
1961 nv->cntr_flags_1.disable_loading_risc_code);
1962
1963 dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
1964 nv->cntr_flags_1.enable_64bit_addressing);
1965
1966 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
1967 nv->bus[0].selection_timeout);
1968 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
1969 nv->bus[1].selection_timeout);
1970
1971 dprintk(1, "qla1280 : max queue depth[0]=%d\n",
1972 nv->bus[0].max_queue_depth);
1973 dprintk(1, "qla1280 : max queue depth[1]=%d\n",
1974 nv->bus[1].max_queue_depth);
1975 }
1976
1977 static void
1978 qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
1979 {
1980 struct nvram *nv = &ha->nvram;
1981
1982 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
1983 nv->bus[bus].target[target].parameter.auto_request_sense = 1;
1984 nv->bus[bus].target[target].parameter.tag_queuing = 1;
1985 nv->bus[bus].target[target].parameter.enable_sync = 1;
1986 #if 1
1987 nv->bus[bus].target[target].parameter.enable_wide = 1;
1988 #endif
1989 nv->bus[bus].target[target].execution_throttle =
1990 nv->bus[bus].max_queue_depth - 1;
1991 nv->bus[bus].target[target].parameter.parity_checking = 1;
1992 nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
1993
1994 if (IS_ISP1x160(ha)) {
1995 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
1996 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
1997 nv->bus[bus].target[target].sync_period = 9;
1998 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
1999 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
2000 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
2001 } else {
2002 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
2003 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
2004 nv->bus[bus].target[target].sync_period = 10;
2005 }
2006 }
2007
2008 static void
2009 qla1280_set_defaults(struct scsi_qla_host *ha)
2010 {
2011 struct nvram *nv = &ha->nvram;
2012 int bus, target;
2013
2014 dprintk(1, "Using defaults for NVRAM: \n");
2015 memset(nv, 0, sizeof(struct nvram));
2016
2017
2018 nv->firmware_feature.f.enable_fast_posting = 1;
2019 nv->firmware_feature.f.disable_synchronous_backoff = 1;
2020 nv->termination.scsi_bus_0_control = 3;
2021 nv->termination.scsi_bus_1_control = 3;
2022 nv->termination.auto_term_support = 1;
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032 nv->isp_config.burst_enable = 1;
2033 if (IS_ISP1040(ha))
2034 nv->isp_config.fifo_threshold |= 3;
2035 else
2036 nv->isp_config.fifo_threshold |= 4;
2037
2038 if (IS_ISP1x160(ha))
2039 nv->isp_parameter = 0x01;
2040
2041 for (bus = 0; bus < MAX_BUSES; bus++) {
2042 nv->bus[bus].config_1.initiator_id = 7;
2043 nv->bus[bus].config_2.req_ack_active_negation = 1;
2044 nv->bus[bus].config_2.data_line_active_negation = 1;
2045 nv->bus[bus].selection_timeout = 250;
2046 nv->bus[bus].max_queue_depth = 32;
2047
2048 if (IS_ISP1040(ha)) {
2049 nv->bus[bus].bus_reset_delay = 3;
2050 nv->bus[bus].config_2.async_data_setup_time = 6;
2051 nv->bus[bus].retry_delay = 1;
2052 } else {
2053 nv->bus[bus].bus_reset_delay = 5;
2054 nv->bus[bus].config_2.async_data_setup_time = 8;
2055 }
2056
2057 for (target = 0; target < MAX_TARGETS; target++)
2058 qla1280_set_target_defaults(ha, bus, target);
2059 }
2060 }
2061
2062 static int
2063 qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
2064 {
2065 struct nvram *nv = &ha->nvram;
2066 uint16_t mb[MAILBOX_REGISTER_COUNT];
2067 int status, lun;
2068 uint16_t flag;
2069
2070
2071 mb[0] = MBC_SET_TARGET_PARAMETERS;
2072 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2073
2074
2075
2076
2077
2078
2079 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
2080 | TP_WIDE | TP_PARITY | TP_DISCONNECT);
2081
2082 if (IS_ISP1x160(ha))
2083 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
2084 else
2085 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
2086 mb[3] |= nv->bus[bus].target[target].sync_period;
2087 status = qla1280_mailbox_command(ha, 0x0f, mb);
2088
2089
2090 flag = (BIT_0 << target);
2091 if (nv->bus[bus].target[target].parameter.tag_queuing)
2092 ha->bus_settings[bus].qtag_enables |= flag;
2093
2094
2095 if (IS_ISP1x160(ha)) {
2096 if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
2097 ha->bus_settings[bus].device_enables |= flag;
2098 ha->bus_settings[bus].lun_disables |= 0;
2099 } else {
2100 if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
2101 ha->bus_settings[bus].device_enables |= flag;
2102
2103 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
2104 ha->bus_settings[bus].lun_disables |= flag;
2105 }
2106
2107
2108 for (lun = 0; lun < MAX_LUNS; lun++) {
2109 mb[0] = MBC_SET_DEVICE_QUEUE;
2110 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2111 mb[1] |= lun;
2112 mb[2] = nv->bus[bus].max_queue_depth;
2113 mb[3] = nv->bus[bus].target[target].execution_throttle;
2114 status |= qla1280_mailbox_command(ha, 0x0f, mb);
2115 }
2116
2117 return status;
2118 }
2119
2120 static int
2121 qla1280_config_bus(struct scsi_qla_host *ha, int bus)
2122 {
2123 struct nvram *nv = &ha->nvram;
2124 uint16_t mb[MAILBOX_REGISTER_COUNT];
2125 int target, status;
2126
2127
2128 ha->bus_settings[bus].disable_scsi_reset =
2129 nv->bus[bus].config_1.scsi_reset_disable;
2130
2131
2132 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
2133 mb[0] = MBC_SET_INITIATOR_ID;
2134 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
2135 ha->bus_settings[bus].id;
2136 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2137
2138
2139 ha->bus_settings[bus].bus_reset_delay =
2140 nv->bus[bus].bus_reset_delay;
2141
2142
2143 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
2144
2145
2146 for (target = 0; target < MAX_TARGETS; target++)
2147 status |= qla1280_config_target(ha, bus, target);
2148
2149 return status;
2150 }
2151
2152 static int
2153 qla1280_nvram_config(struct scsi_qla_host *ha)
2154 {
2155 struct device_reg __iomem *reg = ha->iobase;
2156 struct nvram *nv = &ha->nvram;
2157 int bus, target, status = 0;
2158 uint16_t mb[MAILBOX_REGISTER_COUNT];
2159
2160 ENTER("qla1280_nvram_config");
2161
2162 if (ha->nvram_valid) {
2163
2164 for (bus = 0; bus < MAX_BUSES; bus++)
2165 for (target = 0; target < MAX_TARGETS; target++) {
2166 nv->bus[bus].target[target].parameter.
2167 auto_request_sense = 1;
2168 }
2169 } else {
2170 qla1280_set_defaults(ha);
2171 }
2172
2173 qla1280_print_settings(nv);
2174
2175
2176 ha->flags.disable_risc_code_load =
2177 nv->cntr_flags_1.disable_loading_risc_code;
2178
2179 if (IS_ISP1040(ha)) {
2180 uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
2181
2182 hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK;
2183
2184 cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
2185 cdma_conf = RD_REG_WORD(®->cdma_cfg);
2186 ddma_conf = RD_REG_WORD(®->ddma_cfg);
2187
2188
2189 if (hwrev != ISP_CFG0_1040A)
2190 cfg1 |= nv->isp_config.fifo_threshold << 4;
2191
2192 cfg1 |= nv->isp_config.burst_enable << 2;
2193 WRT_REG_WORD(®->cfg_1, cfg1);
2194
2195 WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
2196 WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
2197 } else {
2198 uint16_t cfg1, term;
2199
2200
2201 cfg1 = nv->isp_config.fifo_threshold << 4;
2202 cfg1 |= nv->isp_config.burst_enable << 2;
2203
2204 if (ha->ports > 1)
2205 cfg1 |= BIT_13;
2206 WRT_REG_WORD(®->cfg_1, cfg1);
2207
2208
2209 WRT_REG_WORD(®->gpio_enable,
2210 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
2211 term = nv->termination.scsi_bus_1_control;
2212 term |= nv->termination.scsi_bus_0_control << 2;
2213 term |= nv->termination.auto_term_support << 7;
2214 RD_REG_WORD(®->id_l);
2215 WRT_REG_WORD(®->gpio_data, term);
2216 }
2217 RD_REG_WORD(®->id_l);
2218
2219
2220 mb[0] = MBC_SET_SYSTEM_PARAMETER;
2221 mb[1] = nv->isp_parameter;
2222 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2223
2224 if (IS_ISP1x40(ha)) {
2225
2226 mb[0] = MBC_SET_CLOCK_RATE;
2227 mb[1] = 40;
2228 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2229 }
2230
2231
2232 mb[0] = MBC_SET_FIRMWARE_FEATURES;
2233 mb[1] = nv->firmware_feature.f.enable_fast_posting;
2234 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
2235 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
2236 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2237
2238
2239 mb[0] = MBC_SET_RETRY_COUNT;
2240 mb[1] = nv->bus[0].retry_count;
2241 mb[2] = nv->bus[0].retry_delay;
2242 mb[6] = nv->bus[1].retry_count;
2243 mb[7] = nv->bus[1].retry_delay;
2244 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
2245 BIT_1 | BIT_0, &mb[0]);
2246
2247
2248 mb[0] = MBC_SET_ASYNC_DATA_SETUP;
2249 mb[1] = nv->bus[0].config_2.async_data_setup_time;
2250 mb[2] = nv->bus[1].config_2.async_data_setup_time;
2251 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2252
2253
2254 mb[0] = MBC_SET_ACTIVE_NEGATION;
2255 mb[1] = 0;
2256 if (nv->bus[0].config_2.req_ack_active_negation)
2257 mb[1] |= BIT_5;
2258 if (nv->bus[0].config_2.data_line_active_negation)
2259 mb[1] |= BIT_4;
2260 mb[2] = 0;
2261 if (nv->bus[1].config_2.req_ack_active_negation)
2262 mb[2] |= BIT_5;
2263 if (nv->bus[1].config_2.data_line_active_negation)
2264 mb[2] |= BIT_4;
2265 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2266
2267 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
2268 mb[1] = 2;
2269 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2270
2271
2272 mb[0] = MBC_SET_PCI_CONTROL;
2273 mb[1] = BIT_1;
2274 mb[2] = BIT_1;
2275 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2276
2277 mb[0] = MBC_SET_TAG_AGE_LIMIT;
2278 mb[1] = 8;
2279 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2280
2281
2282 mb[0] = MBC_SET_SELECTION_TIMEOUT;
2283 mb[1] = nv->bus[0].selection_timeout;
2284 mb[2] = nv->bus[1].selection_timeout;
2285 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2286
2287 for (bus = 0; bus < ha->ports; bus++)
2288 status |= qla1280_config_bus(ha, bus);
2289
2290 if (status)
2291 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
2292
2293 LEAVE("qla1280_nvram_config");
2294 return status;
2295 }
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309 static uint16_t
2310 qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
2311 {
2312 uint32_t nv_cmd;
2313 uint16_t data;
2314
2315 nv_cmd = address << 16;
2316 nv_cmd |= NV_READ_OP;
2317
2318 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
2319
2320 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
2321 "0x%x", data);
2322
2323 return data;
2324 }
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340 static uint16_t
2341 qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
2342 {
2343 struct device_reg __iomem *reg = ha->iobase;
2344 int cnt;
2345 uint16_t data = 0;
2346 uint16_t reg_data;
2347
2348
2349
2350 nv_cmd <<= 5;
2351 for (cnt = 0; cnt < 11; cnt++) {
2352 if (nv_cmd & BIT_31)
2353 qla1280_nv_write(ha, NV_DATA_OUT);
2354 else
2355 qla1280_nv_write(ha, 0);
2356 nv_cmd <<= 1;
2357 }
2358
2359
2360
2361 for (cnt = 0; cnt < 16; cnt++) {
2362 WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK));
2363 RD_REG_WORD(®->id_l);
2364 NVRAM_DELAY();
2365 data <<= 1;
2366 reg_data = RD_REG_WORD(®->nvram);
2367 if (reg_data & NV_DATA_IN)
2368 data |= BIT_0;
2369 WRT_REG_WORD(®->nvram, NV_SELECT);
2370 RD_REG_WORD(®->id_l);
2371 NVRAM_DELAY();
2372 }
2373
2374
2375
2376 WRT_REG_WORD(®->nvram, NV_DESELECT);
2377 RD_REG_WORD(®->id_l);
2378 NVRAM_DELAY();
2379
2380 return data;
2381 }
2382
2383 static void
2384 qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
2385 {
2386 struct device_reg __iomem *reg = ha->iobase;
2387
2388 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2389 RD_REG_WORD(®->id_l);
2390 NVRAM_DELAY();
2391 WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK);
2392 RD_REG_WORD(®->id_l);
2393 NVRAM_DELAY();
2394 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2395 RD_REG_WORD(®->id_l);
2396 NVRAM_DELAY();
2397 }
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414 static int
2415 qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2416 {
2417 struct device_reg __iomem *reg = ha->iobase;
2418 int status = 0;
2419 int cnt;
2420 uint16_t *optr, *iptr;
2421 uint16_t __iomem *mptr;
2422 uint16_t data;
2423 DECLARE_COMPLETION_ONSTACK(wait);
2424
2425 ENTER("qla1280_mailbox_command");
2426
2427 if (ha->mailbox_wait) {
2428 printk(KERN_ERR "Warning mailbox wait already in use!\n");
2429 }
2430 ha->mailbox_wait = &wait;
2431
2432
2433
2434
2435
2436
2437 mptr = (uint16_t __iomem *) ®->mailbox0;
2438 iptr = mb;
2439 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
2440 if (mr & BIT_0) {
2441 WRT_REG_WORD(mptr, (*iptr));
2442 }
2443
2444 mr >>= 1;
2445 mptr++;
2446 iptr++;
2447 }
2448
2449
2450
2451
2452 timer_setup(&ha->mailbox_timer, qla1280_mailbox_timeout, 0);
2453 mod_timer(&ha->mailbox_timer, jiffies + 20 * HZ);
2454
2455 spin_unlock_irq(ha->host->host_lock);
2456 WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT);
2457 data = qla1280_debounce_register(®->istatus);
2458
2459 wait_for_completion(&wait);
2460 del_timer_sync(&ha->mailbox_timer);
2461
2462 spin_lock_irq(ha->host->host_lock);
2463
2464 ha->mailbox_wait = NULL;
2465
2466
2467 if (ha->mailbox_out[0] != MBS_CMD_CMP) {
2468 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
2469 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
2470 "0x%04x\n",
2471 mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus));
2472 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
2473 RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1),
2474 RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3));
2475 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
2476 RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5),
2477 RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7));
2478 status = 1;
2479 }
2480
2481
2482 optr = mb;
2483 iptr = (uint16_t *) &ha->mailbox_out[0];
2484 mr = MAILBOX_REGISTER_COUNT;
2485 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2486
2487 if (ha->flags.reset_marker)
2488 qla1280_rst_aen(ha);
2489
2490 if (status)
2491 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2492 "0x%x ****\n", mb[0]);
2493
2494 LEAVE("qla1280_mailbox_command");
2495 return status;
2496 }
2497
2498
2499
2500
2501
2502
2503
2504
2505 static void
2506 qla1280_poll(struct scsi_qla_host *ha)
2507 {
2508 struct device_reg __iomem *reg = ha->iobase;
2509 uint16_t data;
2510 LIST_HEAD(done_q);
2511
2512
2513
2514
2515 data = RD_REG_WORD(®->istatus);
2516 if (data & RISC_INT)
2517 qla1280_isr(ha, &done_q);
2518
2519 if (!ha->mailbox_wait) {
2520 if (ha->flags.reset_marker)
2521 qla1280_rst_aen(ha);
2522 }
2523
2524 if (!list_empty(&done_q))
2525 qla1280_done(ha);
2526
2527
2528 }
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541 static int
2542 qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
2543 {
2544 uint16_t mb[MAILBOX_REGISTER_COUNT];
2545 uint16_t reset_delay;
2546 int status;
2547
2548 dprintk(3, "qla1280_bus_reset: entered\n");
2549
2550 if (qla1280_verbose)
2551 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
2552 ha->host_no, bus);
2553
2554 reset_delay = ha->bus_settings[bus].bus_reset_delay;
2555 mb[0] = MBC_BUS_RESET;
2556 mb[1] = reset_delay;
2557 mb[2] = (uint16_t) bus;
2558 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2559
2560 if (status) {
2561 if (ha->bus_settings[bus].failed_reset_count > 2)
2562 ha->bus_settings[bus].scsi_bus_dead = 1;
2563 ha->bus_settings[bus].failed_reset_count++;
2564 } else {
2565 spin_unlock_irq(ha->host->host_lock);
2566 ssleep(reset_delay);
2567 spin_lock_irq(ha->host->host_lock);
2568
2569 ha->bus_settings[bus].scsi_bus_dead = 0;
2570 ha->bus_settings[bus].failed_reset_count = 0;
2571 ha->bus_settings[bus].reset_marker = 0;
2572
2573 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
2574 }
2575
2576
2577
2578
2579
2580
2581 if (status)
2582 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
2583 else
2584 dprintk(3, "qla1280_bus_reset: exiting normally\n");
2585
2586 return status;
2587 }
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601 static int
2602 qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2603 {
2604 uint16_t mb[MAILBOX_REGISTER_COUNT];
2605 int status;
2606
2607 ENTER("qla1280_device_reset");
2608
2609 mb[0] = MBC_ABORT_TARGET;
2610 mb[1] = (bus ? (target | BIT_7) : target) << 8;
2611 mb[2] = 1;
2612 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2613
2614
2615 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
2616
2617 if (status)
2618 dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
2619
2620 LEAVE("qla1280_device_reset");
2621 return status;
2622 }
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635 static int
2636 qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
2637 {
2638 uint16_t mb[MAILBOX_REGISTER_COUNT];
2639 unsigned int bus, target, lun;
2640 int status;
2641
2642 ENTER("qla1280_abort_command");
2643
2644 bus = SCSI_BUS_32(sp->cmd);
2645 target = SCSI_TCN_32(sp->cmd);
2646 lun = SCSI_LUN_32(sp->cmd);
2647
2648 sp->flags |= SRB_ABORT_PENDING;
2649
2650 mb[0] = MBC_ABORT_COMMAND;
2651 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2652 mb[2] = handle >> 16;
2653 mb[3] = handle & 0xffff;
2654 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
2655
2656 if (status) {
2657 dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
2658 sp->flags &= ~SRB_ABORT_PENDING;
2659 }
2660
2661
2662 LEAVE("qla1280_abort_command");
2663 return status;
2664 }
2665
2666
2667
2668
2669
2670
2671
2672
2673 static void
2674 qla1280_reset_adapter(struct scsi_qla_host *ha)
2675 {
2676 struct device_reg __iomem *reg = ha->iobase;
2677
2678 ENTER("qla1280_reset_adapter");
2679
2680
2681 ha->flags.online = 0;
2682 WRT_REG_WORD(®->ictrl, ISP_RESET);
2683 WRT_REG_WORD(®->host_cmd,
2684 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
2685 RD_REG_WORD(®->id_l);
2686
2687 LEAVE("qla1280_reset_adapter");
2688 }
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701 static void
2702 qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
2703 {
2704 struct mrk_entry *pkt;
2705
2706 ENTER("qla1280_marker");
2707
2708
2709 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
2710 pkt->entry_type = MARKER_TYPE;
2711 pkt->lun = (uint8_t) lun;
2712 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
2713 pkt->modifier = type;
2714 pkt->entry_status = 0;
2715
2716
2717 qla1280_isp_cmd(ha);
2718 }
2719
2720 LEAVE("qla1280_marker");
2721 }
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736 #ifdef QLA_64BIT_PTR
2737 static int
2738 qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2739 {
2740 struct device_reg __iomem *reg = ha->iobase;
2741 struct scsi_cmnd *cmd = sp->cmd;
2742 cmd_a64_entry_t *pkt;
2743 __le32 *dword_ptr;
2744 dma_addr_t dma_handle;
2745 int status = 0;
2746 int cnt;
2747 int req_cnt;
2748 int seg_cnt;
2749 u8 dir;
2750
2751 ENTER("qla1280_64bit_start_scsi:");
2752
2753
2754 req_cnt = 1;
2755 seg_cnt = scsi_dma_map(cmd);
2756 if (seg_cnt > 0) {
2757 if (seg_cnt > 2) {
2758 req_cnt += (seg_cnt - 2) / 5;
2759 if ((seg_cnt - 2) % 5)
2760 req_cnt++;
2761 }
2762 } else if (seg_cnt < 0) {
2763 status = 1;
2764 goto out;
2765 }
2766
2767 if ((req_cnt + 2) >= ha->req_q_cnt) {
2768
2769 cnt = RD_REG_WORD(®->mailbox4);
2770 if (ha->req_ring_index < cnt)
2771 ha->req_q_cnt = cnt - ha->req_ring_index;
2772 else
2773 ha->req_q_cnt =
2774 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
2775 }
2776
2777 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
2778 ha->req_q_cnt, seg_cnt);
2779
2780
2781 if ((req_cnt + 2) >= ha->req_q_cnt) {
2782 status = SCSI_MLQUEUE_HOST_BUSY;
2783 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2784 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2785 req_cnt);
2786 goto out;
2787 }
2788
2789
2790 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
2791 ha->outstanding_cmds[cnt] != NULL; cnt++);
2792
2793 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2794 status = SCSI_MLQUEUE_HOST_BUSY;
2795 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2796 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2797 goto out;
2798 }
2799
2800 ha->outstanding_cmds[cnt] = sp;
2801 ha->req_q_cnt -= req_cnt;
2802 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
2803
2804 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
2805 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
2806 dprintk(2, " bus %i, target %i, lun %i\n",
2807 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2808 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
2809
2810
2811
2812
2813 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
2814
2815 pkt->entry_type = COMMAND_A64_TYPE;
2816 pkt->entry_count = (uint8_t) req_cnt;
2817 pkt->sys_define = (uint8_t) ha->req_ring_index;
2818 pkt->entry_status = 0;
2819 pkt->handle = cpu_to_le32(cnt);
2820
2821
2822 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2823
2824
2825 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2826
2827
2828 pkt->lun = SCSI_LUN_32(cmd);
2829 pkt->target = SCSI_BUS_32(cmd) ?
2830 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
2831
2832
2833 if (cmd->device->simple_tags)
2834 pkt->control_flags |= cpu_to_le16(BIT_3);
2835
2836
2837 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
2838 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
2839
2840
2841
2842 dir = qla1280_data_direction(cmd);
2843 pkt->control_flags |= cpu_to_le16(dir);
2844
2845
2846 pkt->dseg_count = cpu_to_le16(seg_cnt);
2847
2848
2849
2850
2851 if (seg_cnt) {
2852 struct scatterlist *sg, *s;
2853 int remseg = seg_cnt;
2854
2855 sg = scsi_sglist(cmd);
2856
2857
2858 dword_ptr = (u32 *)&pkt->dseg_0_address;
2859
2860
2861 for_each_sg(sg, s, seg_cnt, cnt) {
2862 if (cnt == 2)
2863 break;
2864
2865 dma_handle = sg_dma_address(s);
2866 *dword_ptr++ =
2867 cpu_to_le32(lower_32_bits(dma_handle));
2868 *dword_ptr++ =
2869 cpu_to_le32(upper_32_bits(dma_handle));
2870 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2871 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2872 cpu_to_le32(upper_32_bits(dma_handle)),
2873 cpu_to_le32(lower_32_bits(dma_handle)),
2874 cpu_to_le32(sg_dma_len(sg_next(s))));
2875 remseg--;
2876 }
2877 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2878 "command packet data - b %i, t %i, l %i \n",
2879 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
2880 SCSI_LUN_32(cmd));
2881 qla1280_dump_buffer(5, (char *)pkt,
2882 REQUEST_ENTRY_SIZE);
2883
2884
2885
2886
2887 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2888 "remains\n", seg_cnt);
2889
2890 while (remseg > 0) {
2891
2892 sg = s;
2893
2894 ha->req_ring_index++;
2895 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2896 ha->req_ring_index = 0;
2897 ha->request_ring_ptr =
2898 ha->request_ring;
2899 } else
2900 ha->request_ring_ptr++;
2901
2902 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
2903
2904
2905 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2906
2907
2908 ((struct cont_a64_entry *) pkt)->entry_type =
2909 CONTINUE_A64_TYPE;
2910 ((struct cont_a64_entry *) pkt)->entry_count = 1;
2911 ((struct cont_a64_entry *) pkt)->sys_define =
2912 (uint8_t)ha->req_ring_index;
2913
2914 dword_ptr =
2915 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2916
2917
2918 for_each_sg(sg, s, remseg, cnt) {
2919 if (cnt == 5)
2920 break;
2921 dma_handle = sg_dma_address(s);
2922 *dword_ptr++ =
2923 cpu_to_le32(lower_32_bits(dma_handle));
2924 *dword_ptr++ =
2925 cpu_to_le32(upper_32_bits(dma_handle));
2926 *dword_ptr++ =
2927 cpu_to_le32(sg_dma_len(s));
2928 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2929 cpu_to_le32(upper_32_bits(dma_handle)),
2930 cpu_to_le32(lower_32_bits(dma_handle)),
2931 cpu_to_le32(sg_dma_len(s)));
2932 }
2933 remseg -= cnt;
2934 dprintk(5, "qla1280_64bit_start_scsi: "
2935 "continuation packet data - b %i, t "
2936 "%i, l %i \n", SCSI_BUS_32(cmd),
2937 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2938 qla1280_dump_buffer(5, (char *)pkt,
2939 REQUEST_ENTRY_SIZE);
2940 }
2941 } else {
2942 dprintk(5, "qla1280_64bit_start_scsi: No data, command "
2943 "packet data - b %i, t %i, l %i \n",
2944 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2945 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
2946 }
2947
2948 ha->req_ring_index++;
2949 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2950 ha->req_ring_index = 0;
2951 ha->request_ring_ptr = ha->request_ring;
2952 } else
2953 ha->request_ring_ptr++;
2954
2955
2956 dprintk(2,
2957 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
2958 sp->flags |= SRB_SENT;
2959 ha->actthreads++;
2960 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
2961
2962 out:
2963 if (status)
2964 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
2965 else
2966 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
2967
2968 return status;
2969 }
2970 #else
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991 static int
2992 qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2993 {
2994 struct device_reg __iomem *reg = ha->iobase;
2995 struct scsi_cmnd *cmd = sp->cmd;
2996 struct cmd_entry *pkt;
2997 __le32 *dword_ptr;
2998 int status = 0;
2999 int cnt;
3000 int req_cnt;
3001 int seg_cnt;
3002 u8 dir;
3003
3004 ENTER("qla1280_32bit_start_scsi");
3005
3006 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
3007 cmd->cmnd[0]);
3008
3009
3010 req_cnt = 1;
3011 seg_cnt = scsi_dma_map(cmd);
3012 if (seg_cnt) {
3013
3014
3015
3016
3017 if (seg_cnt > 4) {
3018 req_cnt += (seg_cnt - 4) / 7;
3019 if ((seg_cnt - 4) % 7)
3020 req_cnt++;
3021 }
3022 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
3023 cmd, seg_cnt, req_cnt);
3024 } else if (seg_cnt < 0) {
3025 status = 1;
3026 goto out;
3027 }
3028
3029 if ((req_cnt + 2) >= ha->req_q_cnt) {
3030
3031 cnt = RD_REG_WORD(®->mailbox4);
3032 if (ha->req_ring_index < cnt)
3033 ha->req_q_cnt = cnt - ha->req_ring_index;
3034 else
3035 ha->req_q_cnt =
3036 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3037 }
3038
3039 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
3040 ha->req_q_cnt, seg_cnt);
3041
3042 if ((req_cnt + 2) >= ha->req_q_cnt) {
3043 status = SCSI_MLQUEUE_HOST_BUSY;
3044 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3045 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3046 ha->req_q_cnt, req_cnt);
3047 goto out;
3048 }
3049
3050
3051 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
3052 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3053
3054 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3055 status = SCSI_MLQUEUE_HOST_BUSY;
3056 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3057 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3058 goto out;
3059 }
3060
3061 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
3062 ha->outstanding_cmds[cnt] = sp;
3063 ha->req_q_cnt -= req_cnt;
3064
3065
3066
3067
3068 pkt = (struct cmd_entry *) ha->request_ring_ptr;
3069
3070 pkt->entry_type = COMMAND_TYPE;
3071 pkt->entry_count = (uint8_t) req_cnt;
3072 pkt->sys_define = (uint8_t) ha->req_ring_index;
3073 pkt->entry_status = 0;
3074 pkt->handle = cpu_to_le32(cnt);
3075
3076
3077 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3078
3079
3080 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3081
3082
3083 pkt->lun = SCSI_LUN_32(cmd);
3084 pkt->target = SCSI_BUS_32(cmd) ?
3085 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
3086
3087
3088 if (cmd->device->simple_tags)
3089 pkt->control_flags |= cpu_to_le16(BIT_3);
3090
3091
3092 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
3093 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
3094
3095
3096
3097 dir = qla1280_data_direction(cmd);
3098 pkt->control_flags |= cpu_to_le16(dir);
3099
3100
3101 pkt->dseg_count = cpu_to_le16(seg_cnt);
3102
3103
3104
3105
3106 if (seg_cnt) {
3107 struct scatterlist *sg, *s;
3108 int remseg = seg_cnt;
3109
3110 sg = scsi_sglist(cmd);
3111
3112
3113 dword_ptr = &pkt->dseg_0_address;
3114
3115 dprintk(3, "Building S/G data segments..\n");
3116 qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3117
3118
3119 for_each_sg(sg, s, seg_cnt, cnt) {
3120 if (cnt == 4)
3121 break;
3122 *dword_ptr++ =
3123 cpu_to_le32(lower_32_bits(sg_dma_address(s)));
3124 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3125 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3126 (lower_32_bits(sg_dma_address(s))),
3127 (sg_dma_len(s)));
3128 remseg--;
3129 }
3130
3131
3132
3133 dprintk(3, "S/G Building Continuation"
3134 "...seg_cnt=0x%x remains\n", seg_cnt);
3135 while (remseg > 0) {
3136
3137 sg = s;
3138
3139 ha->req_ring_index++;
3140 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3141 ha->req_ring_index = 0;
3142 ha->request_ring_ptr =
3143 ha->request_ring;
3144 } else
3145 ha->request_ring_ptr++;
3146
3147 pkt = (struct cmd_entry *)ha->request_ring_ptr;
3148
3149
3150 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3151
3152
3153 ((struct cont_entry *) pkt)->
3154 entry_type = CONTINUE_TYPE;
3155 ((struct cont_entry *) pkt)->entry_count = 1;
3156
3157 ((struct cont_entry *) pkt)->sys_define =
3158 (uint8_t) ha->req_ring_index;
3159
3160
3161 dword_ptr =
3162 &((struct cont_entry *) pkt)->dseg_0_address;
3163
3164
3165 for_each_sg(sg, s, remseg, cnt) {
3166 if (cnt == 7)
3167 break;
3168 *dword_ptr++ =
3169 cpu_to_le32(lower_32_bits(sg_dma_address(s)));
3170 *dword_ptr++ =
3171 cpu_to_le32(sg_dma_len(s));
3172 dprintk(1,
3173 "S/G Segment Cont. phys_addr=0x%x, "
3174 "len=0x%x\n",
3175 cpu_to_le32(lower_32_bits(sg_dma_address(s))),
3176 cpu_to_le32(sg_dma_len(s)));
3177 }
3178 remseg -= cnt;
3179 dprintk(5, "qla1280_32bit_start_scsi: "
3180 "continuation packet data - "
3181 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
3182 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3183 qla1280_dump_buffer(5, (char *)pkt,
3184 REQUEST_ENTRY_SIZE);
3185 }
3186 } else {
3187 dprintk(5, "qla1280_32bit_start_scsi: No data, command "
3188 "packet data - \n");
3189 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3190 }
3191 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
3192 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3193 REQUEST_ENTRY_SIZE);
3194
3195
3196 ha->req_ring_index++;
3197 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3198 ha->req_ring_index = 0;
3199 ha->request_ring_ptr = ha->request_ring;
3200 } else
3201 ha->request_ring_ptr++;
3202
3203
3204 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
3205 "for pending command\n");
3206 sp->flags |= SRB_SENT;
3207 ha->actthreads++;
3208 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3209
3210 out:
3211 if (status)
3212 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
3213
3214 LEAVE("qla1280_32bit_start_scsi");
3215
3216 return status;
3217 }
3218 #endif
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231 static request_t *
3232 qla1280_req_pkt(struct scsi_qla_host *ha)
3233 {
3234 struct device_reg __iomem *reg = ha->iobase;
3235 request_t *pkt = NULL;
3236 int cnt;
3237 uint32_t timer;
3238
3239 ENTER("qla1280_req_pkt");
3240
3241
3242
3243
3244
3245 for (timer = 15000000; timer; timer--) {
3246 if (ha->req_q_cnt > 0) {
3247
3248 cnt = RD_REG_WORD(®->mailbox4);
3249 if (ha->req_ring_index < cnt)
3250 ha->req_q_cnt = cnt - ha->req_ring_index;
3251 else
3252 ha->req_q_cnt =
3253 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3254 }
3255
3256
3257 if (ha->req_q_cnt > 0) {
3258 ha->req_q_cnt--;
3259 pkt = ha->request_ring_ptr;
3260
3261
3262 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3263
3264
3265
3266
3267
3268
3269 pkt->sys_define = (uint8_t) ha->req_ring_index;
3270
3271
3272 pkt->entry_count = 1;
3273
3274 break;
3275 }
3276
3277 udelay(2);
3278
3279
3280 qla1280_poll(ha);
3281 }
3282
3283 if (!pkt)
3284 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
3285 else
3286 dprintk(3, "qla1280_req_pkt: exiting normally\n");
3287
3288 return pkt;
3289 }
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299 static void
3300 qla1280_isp_cmd(struct scsi_qla_host *ha)
3301 {
3302 struct device_reg __iomem *reg = ha->iobase;
3303
3304 ENTER("qla1280_isp_cmd");
3305
3306 dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
3307 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3308 REQUEST_ENTRY_SIZE);
3309
3310
3311 ha->req_ring_index++;
3312 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3313 ha->req_ring_index = 0;
3314 ha->request_ring_ptr = ha->request_ring;
3315 } else
3316 ha->request_ring_ptr++;
3317
3318
3319
3320
3321 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3322
3323 LEAVE("qla1280_isp_cmd");
3324 }
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338 static void
3339 qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3340 {
3341 struct device_reg __iomem *reg = ha->iobase;
3342 struct response *pkt;
3343 struct srb *sp = NULL;
3344 uint16_t mailbox[MAILBOX_REGISTER_COUNT];
3345 uint16_t *wptr;
3346 uint32_t index;
3347 u16 istatus;
3348
3349 ENTER("qla1280_isr");
3350
3351 istatus = RD_REG_WORD(®->istatus);
3352 if (!(istatus & (RISC_INT | PCI_INT)))
3353 return;
3354
3355
3356 mailbox[5] = RD_REG_WORD(®->mailbox5);
3357
3358
3359
3360 mailbox[0] = RD_REG_WORD_dmasync(®->semaphore);
3361
3362 if (mailbox[0] & BIT_0) {
3363
3364
3365
3366 wptr = &mailbox[0];
3367 *wptr++ = RD_REG_WORD(®->mailbox0);
3368 *wptr++ = RD_REG_WORD(®->mailbox1);
3369 *wptr = RD_REG_WORD(®->mailbox2);
3370 if (mailbox[0] != MBA_SCSI_COMPLETION) {
3371 wptr++;
3372 *wptr++ = RD_REG_WORD(®->mailbox3);
3373 *wptr++ = RD_REG_WORD(®->mailbox4);
3374 wptr++;
3375 *wptr++ = RD_REG_WORD(®->mailbox6);
3376 *wptr = RD_REG_WORD(®->mailbox7);
3377 }
3378
3379
3380
3381 WRT_REG_WORD(®->semaphore, 0);
3382 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3383
3384 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
3385 mailbox[0]);
3386
3387
3388 switch (mailbox[0]) {
3389 case MBA_SCSI_COMPLETION:
3390 dprintk(5, "qla1280_isr: mailbox SCSI response "
3391 "completion\n");
3392
3393 if (ha->flags.online) {
3394
3395 index = mailbox[2] << 16 | mailbox[1];
3396
3397
3398 if (index < MAX_OUTSTANDING_COMMANDS)
3399 sp = ha->outstanding_cmds[index];
3400 else
3401 sp = NULL;
3402
3403 if (sp) {
3404
3405 ha->outstanding_cmds[index] = NULL;
3406
3407
3408 CMD_RESULT(sp->cmd) = 0;
3409 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3410
3411
3412 list_add_tail(&sp->list, done_q);
3413 } else {
3414
3415
3416
3417 printk(KERN_WARNING
3418 "qla1280: ISP invalid handle\n");
3419 }
3420 }
3421 break;
3422
3423 case MBA_BUS_RESET:
3424 ha->flags.reset_marker = 1;
3425 index = mailbox[6] & BIT_0;
3426 ha->bus_settings[index].reset_marker = 1;
3427
3428 printk(KERN_DEBUG "qla1280_isr(): index %i "
3429 "asynchronous BUS_RESET\n", index);
3430 break;
3431
3432 case MBA_SYSTEM_ERR:
3433 printk(KERN_WARNING
3434 "qla1280: ISP System Error - mbx1=%xh, mbx2="
3435 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
3436 mailbox[3]);
3437 break;
3438
3439 case MBA_REQ_TRANSFER_ERR:
3440 printk(KERN_WARNING
3441 "qla1280: ISP Request Transfer Error\n");
3442 break;
3443
3444 case MBA_RSP_TRANSFER_ERR:
3445 printk(KERN_WARNING
3446 "qla1280: ISP Response Transfer Error\n");
3447 break;
3448
3449 case MBA_WAKEUP_THRES:
3450 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
3451 break;
3452
3453 case MBA_TIMEOUT_RESET:
3454 dprintk(2,
3455 "qla1280_isr: asynchronous TIMEOUT_RESET\n");
3456 break;
3457
3458 case MBA_DEVICE_RESET:
3459 printk(KERN_INFO "qla1280_isr(): asynchronous "
3460 "BUS_DEVICE_RESET\n");
3461
3462 ha->flags.reset_marker = 1;
3463 index = mailbox[6] & BIT_0;
3464 ha->bus_settings[index].reset_marker = 1;
3465 break;
3466
3467 case MBA_BUS_MODE_CHANGE:
3468 dprintk(2,
3469 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
3470 break;
3471
3472 default:
3473
3474 if (mailbox[0] < MBA_ASYNC_EVENT) {
3475 wptr = &mailbox[0];
3476 memcpy((uint16_t *) ha->mailbox_out, wptr,
3477 MAILBOX_REGISTER_COUNT *
3478 sizeof(uint16_t));
3479
3480 if(ha->mailbox_wait != NULL)
3481 complete(ha->mailbox_wait);
3482 }
3483 break;
3484 }
3485 } else {
3486 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3487 }
3488
3489
3490
3491
3492
3493 if (!(ha->flags.online && !ha->mailbox_wait)) {
3494 dprintk(2, "qla1280_isr: Response pointer Error\n");
3495 goto out;
3496 }
3497
3498 if (mailbox[5] >= RESPONSE_ENTRY_CNT)
3499 goto out;
3500
3501 while (ha->rsp_ring_index != mailbox[5]) {
3502 pkt = ha->response_ring_ptr;
3503
3504 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
3505 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
3506 dprintk(5,"qla1280_isr: response packet data\n");
3507 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
3508
3509 if (pkt->entry_type == STATUS_TYPE) {
3510 if ((le16_to_cpu(pkt->scsi_status) & 0xff)
3511 || pkt->comp_status || pkt->entry_status) {
3512 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3513 "0x%x mailbox[5] = 0x%x, comp_status "
3514 "= 0x%x, scsi_status = 0x%x\n",
3515 ha->rsp_ring_index, mailbox[5],
3516 le16_to_cpu(pkt->comp_status),
3517 le16_to_cpu(pkt->scsi_status));
3518 }
3519 } else {
3520 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3521 "0x%x, mailbox[5] = 0x%x\n",
3522 ha->rsp_ring_index, mailbox[5]);
3523 dprintk(2, "qla1280_isr: response packet data\n");
3524 qla1280_dump_buffer(2, (char *)pkt,
3525 RESPONSE_ENTRY_SIZE);
3526 }
3527
3528 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
3529 dprintk(2, "status: Cmd %p, handle %i\n",
3530 ha->outstanding_cmds[pkt->handle]->cmd,
3531 pkt->handle);
3532 if (pkt->entry_type == STATUS_TYPE)
3533 qla1280_status_entry(ha, pkt, done_q);
3534 else
3535 qla1280_error_entry(ha, pkt, done_q);
3536
3537 ha->rsp_ring_index++;
3538 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
3539 ha->rsp_ring_index = 0;
3540 ha->response_ring_ptr = ha->response_ring;
3541 } else
3542 ha->response_ring_ptr++;
3543 WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index);
3544 }
3545 }
3546
3547 out:
3548 LEAVE("qla1280_isr");
3549 }
3550
3551
3552
3553
3554
3555
3556
3557
3558 static void
3559 qla1280_rst_aen(struct scsi_qla_host *ha)
3560 {
3561 uint8_t bus;
3562
3563 ENTER("qla1280_rst_aen");
3564
3565 if (ha->flags.online && !ha->flags.reset_active &&
3566 !ha->flags.abort_isp_active) {
3567 ha->flags.reset_active = 1;
3568 while (ha->flags.reset_marker) {
3569
3570 ha->flags.reset_marker = 0;
3571 for (bus = 0; bus < ha->ports &&
3572 !ha->flags.reset_marker; bus++) {
3573 if (ha->bus_settings[bus].reset_marker) {
3574 ha->bus_settings[bus].reset_marker = 0;
3575 qla1280_marker(ha, bus, 0, 0,
3576 MK_SYNC_ALL);
3577 }
3578 }
3579 }
3580 }
3581
3582 LEAVE("qla1280_rst_aen");
3583 }
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595 static void
3596 qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3597 struct list_head *done_q)
3598 {
3599 unsigned int bus, target, lun;
3600 int sense_sz;
3601 struct srb *sp;
3602 struct scsi_cmnd *cmd;
3603 uint32_t handle = le32_to_cpu(pkt->handle);
3604 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
3605 uint16_t comp_status = le16_to_cpu(pkt->comp_status);
3606
3607 ENTER("qla1280_status_entry");
3608
3609
3610 if (handle < MAX_OUTSTANDING_COMMANDS)
3611 sp = ha->outstanding_cmds[handle];
3612 else
3613 sp = NULL;
3614
3615 if (!sp) {
3616 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
3617 goto out;
3618 }
3619
3620
3621 ha->outstanding_cmds[handle] = NULL;
3622
3623 cmd = sp->cmd;
3624
3625
3626 bus = SCSI_BUS_32(cmd);
3627 target = SCSI_TCN_32(cmd);
3628 lun = SCSI_LUN_32(cmd);
3629
3630 if (comp_status || scsi_status) {
3631 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
3632 "0x%x, handle = 0x%x\n", comp_status,
3633 scsi_status, handle);
3634 }
3635
3636
3637 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
3638 (scsi_status & 0xFF) == SAM_STAT_BUSY) {
3639 CMD_RESULT(cmd) = scsi_status & 0xff;
3640 } else {
3641
3642
3643 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
3644
3645 if (scsi_status & SAM_STAT_CHECK_CONDITION) {
3646 if (comp_status != CS_ARS_FAILED) {
3647 uint16_t req_sense_length =
3648 le16_to_cpu(pkt->req_sense_length);
3649 if (req_sense_length < CMD_SNSLEN(cmd))
3650 sense_sz = req_sense_length;
3651 else
3652
3653
3654
3655
3656
3657 sense_sz = CMD_SNSLEN(cmd) - 1;
3658
3659 memcpy(cmd->sense_buffer,
3660 &pkt->req_sense_data, sense_sz);
3661 } else
3662 sense_sz = 0;
3663 memset(cmd->sense_buffer + sense_sz, 0,
3664 SCSI_SENSE_BUFFERSIZE - sense_sz);
3665
3666 dprintk(2, "qla1280_status_entry: Check "
3667 "condition Sense data, b %i, t %i, "
3668 "l %i\n", bus, target, lun);
3669 if (sense_sz)
3670 qla1280_dump_buffer(2,
3671 (char *)cmd->sense_buffer,
3672 sense_sz);
3673 }
3674 }
3675
3676 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3677
3678
3679 list_add_tail(&sp->list, done_q);
3680 out:
3681 LEAVE("qla1280_status_entry");
3682 }
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693 static void
3694 qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3695 struct list_head *done_q)
3696 {
3697 struct srb *sp;
3698 uint32_t handle = le32_to_cpu(pkt->handle);
3699
3700 ENTER("qla1280_error_entry");
3701
3702 if (pkt->entry_status & BIT_3)
3703 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
3704 else if (pkt->entry_status & BIT_2)
3705 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
3706 else if (pkt->entry_status & BIT_1)
3707 dprintk(2, "qla1280_error_entry: FULL flag error\n");
3708 else
3709 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
3710
3711
3712 if (handle < MAX_OUTSTANDING_COMMANDS)
3713 sp = ha->outstanding_cmds[handle];
3714 else
3715 sp = NULL;
3716
3717 if (sp) {
3718
3719 ha->outstanding_cmds[handle] = NULL;
3720
3721
3722 if (pkt->entry_status & (BIT_3 + BIT_2)) {
3723
3724
3725 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3726 } else if (pkt->entry_status & BIT_1) {
3727 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
3728 } else {
3729
3730 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3731 }
3732
3733 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3734
3735
3736 list_add_tail(&sp->list, done_q);
3737 }
3738 #ifdef QLA_64BIT_PTR
3739 else if (pkt->entry_type == COMMAND_A64_TYPE) {
3740 printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
3741 }
3742 #endif
3743
3744 LEAVE("qla1280_error_entry");
3745 }
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757 static int
3758 qla1280_abort_isp(struct scsi_qla_host *ha)
3759 {
3760 struct device_reg __iomem *reg = ha->iobase;
3761 struct srb *sp;
3762 int status = 0;
3763 int cnt;
3764 int bus;
3765
3766 ENTER("qla1280_abort_isp");
3767
3768 if (ha->flags.abort_isp_active || !ha->flags.online)
3769 goto out;
3770
3771 ha->flags.abort_isp_active = 1;
3772
3773
3774 qla1280_disable_intrs(ha);
3775 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3776 RD_REG_WORD(®->id_l);
3777
3778 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
3779 ha->host_no);
3780
3781 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3782 struct scsi_cmnd *cmd;
3783 sp = ha->outstanding_cmds[cnt];
3784 if (sp) {
3785 cmd = sp->cmd;
3786 CMD_RESULT(cmd) = DID_RESET << 16;
3787 CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3788 ha->outstanding_cmds[cnt] = NULL;
3789 list_add_tail(&sp->list, &ha->done_q);
3790 }
3791 }
3792
3793 qla1280_done(ha);
3794
3795 status = qla1280_load_firmware(ha);
3796 if (status)
3797 goto out;
3798
3799
3800 qla1280_nvram_config (ha);
3801
3802 status = qla1280_init_rings(ha);
3803 if (status)
3804 goto out;
3805
3806
3807 for (bus = 0; bus < ha->ports; bus++)
3808 qla1280_bus_reset(ha, bus);
3809
3810 ha->flags.abort_isp_active = 0;
3811 out:
3812 if (status) {
3813 printk(KERN_WARNING
3814 "qla1280: ISP error recovery failed, board disabled");
3815 qla1280_reset_adapter(ha);
3816 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
3817 }
3818
3819 LEAVE("qla1280_abort_isp");
3820 return status;
3821 }
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834 static u16
3835 qla1280_debounce_register(volatile u16 __iomem * addr)
3836 {
3837 volatile u16 ret;
3838 volatile u16 ret2;
3839
3840 ret = RD_REG_WORD(addr);
3841 ret2 = RD_REG_WORD(addr);
3842
3843 if (ret == ret2)
3844 return ret;
3845
3846 do {
3847 cpu_relax();
3848 ret = RD_REG_WORD(addr);
3849 ret2 = RD_REG_WORD(addr);
3850 } while (ret != ret2);
3851
3852 return ret;
3853 }
3854
3855
3856
3857
3858
3859
3860
3861 #define SET_SXP_BANK 0x0100
3862 #define SCSI_PHASE_INVALID 0x87FF
3863 static int
3864 qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3865 {
3866 uint16_t config_reg, scsi_control;
3867 struct device_reg __iomem *reg = ha->iobase;
3868
3869 if (ha->bus_settings[bus].scsi_bus_dead) {
3870 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3871 config_reg = RD_REG_WORD(®->cfg_1);
3872 WRT_REG_WORD(®->cfg_1, SET_SXP_BANK);
3873 scsi_control = RD_REG_WORD(®->scsiControlPins);
3874 WRT_REG_WORD(®->cfg_1, config_reg);
3875 WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC);
3876
3877 if (scsi_control == SCSI_PHASE_INVALID) {
3878 ha->bus_settings[bus].scsi_bus_dead = 1;
3879 return 1;
3880 } else {
3881 ha->bus_settings[bus].scsi_bus_dead = 0;
3882 ha->bus_settings[bus].failed_reset_count = 0;
3883 }
3884 }
3885 return 0;
3886 }
3887
3888 static void
3889 qla1280_get_target_parameters(struct scsi_qla_host *ha,
3890 struct scsi_device *device)
3891 {
3892 uint16_t mb[MAILBOX_REGISTER_COUNT];
3893 int bus, target, lun;
3894
3895 bus = device->channel;
3896 target = device->id;
3897 lun = device->lun;
3898
3899
3900 mb[0] = MBC_GET_TARGET_PARAMETERS;
3901 mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
3902 mb[1] <<= 8;
3903 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
3904 &mb[0]);
3905
3906 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
3907
3908 if (mb[3] != 0) {
3909 printk(" Sync: period %d, offset %d",
3910 (mb[3] & 0xff), (mb[3] >> 8));
3911 if (mb[2] & BIT_13)
3912 printk(", Wide");
3913 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
3914 printk(", DT");
3915 } else
3916 printk(" Async");
3917
3918 if (device->simple_tags)
3919 printk(", Tagged queuing: depth %d", device->queue_depth);
3920 printk("\n");
3921 }
3922
3923
3924 #if DEBUG_QLA1280
3925 static void
3926 __qla1280_dump_buffer(char *b, int size)
3927 {
3928 int cnt;
3929 u8 c;
3930
3931 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
3932 "Bh Ch Dh Eh Fh\n");
3933 printk(KERN_DEBUG "---------------------------------------------"
3934 "------------------\n");
3935
3936 for (cnt = 0; cnt < size;) {
3937 c = *b++;
3938
3939 printk("0x%02x", c);
3940 cnt++;
3941 if (!(cnt % 16))
3942 printk("\n");
3943 else
3944 printk(" ");
3945 }
3946 if (cnt % 16)
3947 printk("\n");
3948 }
3949
3950
3951
3952
3953
3954 static void
3955 __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
3956 {
3957 struct scsi_qla_host *ha;
3958 struct Scsi_Host *host = CMD_HOST(cmd);
3959 struct srb *sp;
3960
3961
3962 int i;
3963 ha = (struct scsi_qla_host *)host->hostdata;
3964
3965 sp = (struct srb *)CMD_SP(cmd);
3966 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
3967 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
3968 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
3969 CMD_CDBLEN(cmd));
3970 printk(" CDB = ");
3971 for (i = 0; i < cmd->cmd_len; i++) {
3972 printk("0x%02x ", cmd->cmnd[i]);
3973 }
3974 printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
3975 printk(" request buffer=0x%p, request buffer len=0x%x\n",
3976 scsi_sglist(cmd), scsi_bufflen(cmd));
3977
3978
3979
3980
3981
3982
3983 printk(" tag=%d, transfersize=0x%x \n",
3984 cmd->tag, cmd->transfersize);
3985 printk(" SP=0x%p\n", CMD_SP(cmd));
3986 printk(" underflow size = 0x%x, direction=0x%x\n",
3987 cmd->underflow, cmd->sc_data_direction);
3988 }
3989
3990
3991
3992
3993
3994 static void
3995 ql1280_dump_device(struct scsi_qla_host *ha)
3996 {
3997
3998 struct scsi_cmnd *cp;
3999 struct srb *sp;
4000 int i;
4001
4002 printk(KERN_DEBUG "Outstanding Commands on controller:\n");
4003
4004 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
4005 if ((sp = ha->outstanding_cmds[i]) == NULL)
4006 continue;
4007 if ((cp = sp->cmd) == NULL)
4008 continue;
4009 qla1280_print_scsi_cmd(1, cp);
4010 }
4011 }
4012 #endif
4013
4014
4015 enum tokens {
4016 TOKEN_NVRAM,
4017 TOKEN_SYNC,
4018 TOKEN_WIDE,
4019 TOKEN_PPR,
4020 TOKEN_VERBOSE,
4021 TOKEN_DEBUG,
4022 };
4023
4024 struct setup_tokens {
4025 char *token;
4026 int val;
4027 };
4028
4029 static struct setup_tokens setup_token[] __initdata =
4030 {
4031 { "nvram", TOKEN_NVRAM },
4032 { "sync", TOKEN_SYNC },
4033 { "wide", TOKEN_WIDE },
4034 { "ppr", TOKEN_PPR },
4035 { "verbose", TOKEN_VERBOSE },
4036 { "debug", TOKEN_DEBUG },
4037 };
4038
4039
4040
4041
4042
4043
4044
4045
4046 static int __init
4047 qla1280_setup(char *s)
4048 {
4049 char *cp, *ptr;
4050 unsigned long val;
4051 int toke;
4052
4053 cp = s;
4054
4055 while (cp && (ptr = strchr(cp, ':'))) {
4056 ptr++;
4057 if (!strcmp(ptr, "yes")) {
4058 val = 0x10000;
4059 ptr += 3;
4060 } else if (!strcmp(ptr, "no")) {
4061 val = 0;
4062 ptr += 2;
4063 } else
4064 val = simple_strtoul(ptr, &ptr, 0);
4065
4066 switch ((toke = qla1280_get_token(cp))) {
4067 case TOKEN_NVRAM:
4068 if (!val)
4069 driver_setup.no_nvram = 1;
4070 break;
4071 case TOKEN_SYNC:
4072 if (!val)
4073 driver_setup.no_sync = 1;
4074 else if (val != 0x10000)
4075 driver_setup.sync_mask = val;
4076 break;
4077 case TOKEN_WIDE:
4078 if (!val)
4079 driver_setup.no_wide = 1;
4080 else if (val != 0x10000)
4081 driver_setup.wide_mask = val;
4082 break;
4083 case TOKEN_PPR:
4084 if (!val)
4085 driver_setup.no_ppr = 1;
4086 else if (val != 0x10000)
4087 driver_setup.ppr_mask = val;
4088 break;
4089 case TOKEN_VERBOSE:
4090 qla1280_verbose = val;
4091 break;
4092 default:
4093 printk(KERN_INFO "qla1280: unknown boot option %s\n",
4094 cp);
4095 }
4096
4097 cp = strchr(ptr, ';');
4098 if (cp)
4099 cp++;
4100 else {
4101 break;
4102 }
4103 }
4104 return 1;
4105 }
4106
4107
4108 static int __init
4109 qla1280_get_token(char *str)
4110 {
4111 char *sep;
4112 long ret = -1;
4113 int i;
4114
4115 sep = strchr(str, ':');
4116
4117 if (sep) {
4118 for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
4119 if (!strncmp(setup_token[i].token, str, (sep - str))) {
4120 ret = setup_token[i].val;
4121 break;
4122 }
4123 }
4124 }
4125
4126 return ret;
4127 }
4128
4129
4130 static struct scsi_host_template qla1280_driver_template = {
4131 .module = THIS_MODULE,
4132 .proc_name = "qla1280",
4133 .name = "Qlogic ISP 1280/12160",
4134 .info = qla1280_info,
4135 .slave_configure = qla1280_slave_configure,
4136 .queuecommand = qla1280_queuecommand,
4137 .eh_abort_handler = qla1280_eh_abort,
4138 .eh_device_reset_handler= qla1280_eh_device_reset,
4139 .eh_bus_reset_handler = qla1280_eh_bus_reset,
4140 .eh_host_reset_handler = qla1280_eh_adapter_reset,
4141 .bios_param = qla1280_biosparam,
4142 .can_queue = MAX_OUTSTANDING_COMMANDS,
4143 .this_id = -1,
4144 .sg_tablesize = SG_ALL,
4145 };
4146
4147
4148 static int
4149 qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4150 {
4151 int devnum = id->driver_data;
4152 struct qla_boards *bdp = &ql1280_board_tbl[devnum];
4153 struct Scsi_Host *host;
4154 struct scsi_qla_host *ha;
4155 int error = -ENODEV;
4156
4157
4158 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
4159 printk(KERN_INFO
4160 "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
4161 goto error;
4162 }
4163
4164 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
4165 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
4166
4167 if (pci_enable_device(pdev)) {
4168 printk(KERN_WARNING
4169 "qla1280: Failed to enabled pci device, aborting.\n");
4170 goto error;
4171 }
4172
4173 pci_set_master(pdev);
4174
4175 error = -ENOMEM;
4176 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
4177 if (!host) {
4178 printk(KERN_WARNING
4179 "qla1280: Failed to register host, aborting.\n");
4180 goto error_disable_device;
4181 }
4182
4183 ha = (struct scsi_qla_host *)host->hostdata;
4184 memset(ha, 0, sizeof(struct scsi_qla_host));
4185
4186 ha->pdev = pdev;
4187 ha->devnum = devnum;
4188
4189 #ifdef QLA_64BIT_PTR
4190 if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
4191 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
4192 printk(KERN_WARNING "scsi(%li): Unable to set a "
4193 "suitable DMA mask - aborting\n", ha->host_no);
4194 error = -ENODEV;
4195 goto error_put_host;
4196 }
4197 } else
4198 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
4199 ha->host_no);
4200 #else
4201 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
4202 printk(KERN_WARNING "scsi(%li): Unable to set a "
4203 "suitable DMA mask - aborting\n", ha->host_no);
4204 error = -ENODEV;
4205 goto error_put_host;
4206 }
4207 #endif
4208
4209 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
4210 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4211 &ha->request_dma, GFP_KERNEL);
4212 if (!ha->request_ring) {
4213 printk(KERN_INFO "qla1280: Failed to get request memory\n");
4214 goto error_put_host;
4215 }
4216
4217 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
4218 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4219 &ha->response_dma, GFP_KERNEL);
4220 if (!ha->response_ring) {
4221 printk(KERN_INFO "qla1280: Failed to get response memory\n");
4222 goto error_free_request_ring;
4223 }
4224
4225 ha->ports = bdp->numPorts;
4226
4227 ha->host = host;
4228 ha->host_no = host->host_no;
4229
4230 host->irq = pdev->irq;
4231 host->max_channel = bdp->numPorts - 1;
4232 host->max_lun = MAX_LUNS - 1;
4233 host->max_id = MAX_TARGETS;
4234 host->max_sectors = 1024;
4235 host->unique_id = host->host_no;
4236
4237 error = -ENODEV;
4238
4239 #if MEMORY_MAPPED_IO
4240 ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
4241 if (!ha->mmpbase) {
4242 printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
4243 goto error_free_response_ring;
4244 }
4245
4246 host->base = (unsigned long)ha->mmpbase;
4247 ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
4248 #else
4249 host->io_port = pci_resource_start(ha->pdev, 0);
4250 if (!request_region(host->io_port, 0xff, "qla1280")) {
4251 printk(KERN_INFO "qla1280: Failed to reserve i/o region "
4252 "0x%04lx-0x%04lx - already in use\n",
4253 host->io_port, host->io_port + 0xff);
4254 goto error_free_response_ring;
4255 }
4256
4257 ha->iobase = (struct device_reg *)host->io_port;
4258 #endif
4259
4260 INIT_LIST_HEAD(&ha->done_q);
4261
4262
4263 qla1280_disable_intrs(ha);
4264
4265 if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
4266 "qla1280", ha)) {
4267 printk("qla1280 : Failed to reserve interrupt %d already "
4268 "in use\n", pdev->irq);
4269 goto error_release_region;
4270 }
4271
4272
4273 if (qla1280_initialize_adapter(ha)) {
4274 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
4275 goto error_free_irq;
4276 }
4277
4278
4279 host->this_id = ha->bus_settings[0].id;
4280
4281 pci_set_drvdata(pdev, host);
4282
4283 error = scsi_add_host(host, &pdev->dev);
4284 if (error)
4285 goto error_disable_adapter;
4286 scsi_scan_host(host);
4287
4288 return 0;
4289
4290 error_disable_adapter:
4291 qla1280_disable_intrs(ha);
4292 error_free_irq:
4293 free_irq(pdev->irq, ha);
4294 error_release_region:
4295 #if MEMORY_MAPPED_IO
4296 iounmap(ha->mmpbase);
4297 #else
4298 release_region(host->io_port, 0xff);
4299 #endif
4300 error_free_response_ring:
4301 dma_free_coherent(&ha->pdev->dev,
4302 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4303 ha->response_ring, ha->response_dma);
4304 error_free_request_ring:
4305 dma_free_coherent(&ha->pdev->dev,
4306 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4307 ha->request_ring, ha->request_dma);
4308 error_put_host:
4309 scsi_host_put(host);
4310 error_disable_device:
4311 pci_disable_device(pdev);
4312 error:
4313 return error;
4314 }
4315
4316
4317 static void
4318 qla1280_remove_one(struct pci_dev *pdev)
4319 {
4320 struct Scsi_Host *host = pci_get_drvdata(pdev);
4321 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
4322
4323 scsi_remove_host(host);
4324
4325 qla1280_disable_intrs(ha);
4326
4327 free_irq(pdev->irq, ha);
4328
4329 #if MEMORY_MAPPED_IO
4330 iounmap(ha->mmpbase);
4331 #else
4332 release_region(host->io_port, 0xff);
4333 #endif
4334
4335 dma_free_coherent(&ha->pdev->dev,
4336 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
4337 ha->request_ring, ha->request_dma);
4338 dma_free_coherent(&ha->pdev->dev,
4339 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
4340 ha->response_ring, ha->response_dma);
4341
4342 pci_disable_device(pdev);
4343
4344 scsi_host_put(host);
4345 }
4346
4347 static struct pci_driver qla1280_pci_driver = {
4348 .name = "qla1280",
4349 .id_table = qla1280_pci_tbl,
4350 .probe = qla1280_probe_one,
4351 .remove = qla1280_remove_one,
4352 };
4353
4354 static int __init
4355 qla1280_init(void)
4356 {
4357 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
4358 printk(KERN_WARNING
4359 "qla1280: struct srb too big, aborting\n");
4360 return -EINVAL;
4361 }
4362
4363 #ifdef MODULE
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376 if (qla1280)
4377 qla1280_setup(qla1280);
4378 #endif
4379
4380 return pci_register_driver(&qla1280_pci_driver);
4381 }
4382
4383 static void __exit
4384 qla1280_exit(void)
4385 {
4386 int i;
4387
4388 pci_unregister_driver(&qla1280_pci_driver);
4389
4390 for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
4391 release_firmware(qla1280_fw_tbl[i].fw);
4392 qla1280_fw_tbl[i].fw = NULL;
4393 }
4394 }
4395
4396 module_init(qla1280_init);
4397 module_exit(qla1280_exit);
4398
4399 MODULE_AUTHOR("Qlogic & Jes Sorensen");
4400 MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
4401 MODULE_LICENSE("GPL");
4402 MODULE_FIRMWARE("qlogic/1040.bin");
4403 MODULE_FIRMWARE("qlogic/1280.bin");
4404 MODULE_FIRMWARE("qlogic/12160.bin");
4405 MODULE_VERSION(QLA1280_VERSION);
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417