This source file includes following definitions.
- sym_build_sge
- sym_m_get_dma_mem_cluster
- sym_m_free_dma_mem_cluster
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27 #include <linux/gfp.h>
28
29 #ifndef SYM_HIPD_H
30 #define SYM_HIPD_H
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47 #if 0
48 #define SYM_OPT_HANDLE_DEVICE_QUEUEING
49 #define SYM_OPT_LIMIT_COMMAND_REORDERING
50 #endif
51
52
53
54
55
56
57 #define DEBUG_ALLOC (0x0001)
58 #define DEBUG_PHASE (0x0002)
59 #define DEBUG_POLL (0x0004)
60 #define DEBUG_QUEUE (0x0008)
61 #define DEBUG_RESULT (0x0010)
62 #define DEBUG_SCATTER (0x0020)
63 #define DEBUG_SCRIPT (0x0040)
64 #define DEBUG_TINY (0x0080)
65 #define DEBUG_TIMING (0x0100)
66 #define DEBUG_NEGO (0x0200)
67 #define DEBUG_TAGS (0x0400)
68 #define DEBUG_POINTER (0x0800)
69
70 #ifndef DEBUG_FLAGS
71 #define DEBUG_FLAGS (0x0000)
72 #endif
73
74 #ifndef sym_verbose
75 #define sym_verbose (np->verbose)
76 #endif
77
78
79
80
81 #ifndef assert
82 #define assert(expression) { \
83 if (!(expression)) { \
84 (void)panic( \
85 "assertion \"%s\" failed: file \"%s\", line %d\n", \
86 #expression, \
87 __FILE__, __LINE__); \
88 } \
89 }
90 #endif
91
92
93
94
95 #if SYM_CONF_MAX_TAG_ORDER > 8
96 #error "more than 256 tags per logical unit not allowed."
97 #endif
98 #define SYM_CONF_MAX_TASK (1<<SYM_CONF_MAX_TAG_ORDER)
99
100
101
102
103 #ifndef SYM_CONF_MAX_TAG
104 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK
105 #endif
106 #if SYM_CONF_MAX_TAG > SYM_CONF_MAX_TASK
107 #undef SYM_CONF_MAX_TAG
108 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK
109 #endif
110
111
112
113
114 #define NO_TAG (256)
115
116
117
118
119 #if SYM_CONF_MAX_TARGET > 16
120 #error "more than 16 targets not allowed."
121 #endif
122
123
124
125
126 #if SYM_CONF_MAX_LUN > 64
127 #error "more than 64 logical units per target not allowed."
128 #endif
129
130
131
132
133
134 #define SYM_CONF_MIN_ASYNC (40)
135
136
137
138
139
140
141 #define SYM_MEM_WARN 1
142
143 #define SYM_MEM_PAGE_ORDER 0
144 #define SYM_MEM_CLUSTER_SHIFT (PAGE_SHIFT+SYM_MEM_PAGE_ORDER)
145 #define SYM_MEM_FREE_UNUSED
146
147
148
149
150
151 #define SYM_MEM_SHIFT 4
152 #define SYM_MEM_CLUSTER_SIZE (1UL << SYM_MEM_CLUSTER_SHIFT)
153 #define SYM_MEM_CLUSTER_MASK (SYM_MEM_CLUSTER_SIZE-1)
154
155
156
157
158
159
160
161 #ifdef SYM_CONF_MAX_START
162 #define SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2)
163 #else
164 #define SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2)
165 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
166 #endif
167
168 #if SYM_CONF_MAX_QUEUE > SYM_MEM_CLUSTER_SIZE/8
169 #undef SYM_CONF_MAX_QUEUE
170 #define SYM_CONF_MAX_QUEUE (SYM_MEM_CLUSTER_SIZE/8)
171 #undef SYM_CONF_MAX_START
172 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
173 #endif
174
175
176
177
178 #define MAX_QUEUE SYM_CONF_MAX_QUEUE
179
180
181
182
183
184 #define INB_OFF(np, o) ioread8(np->s.ioaddr + (o))
185 #define INW_OFF(np, o) ioread16(np->s.ioaddr + (o))
186 #define INL_OFF(np, o) ioread32(np->s.ioaddr + (o))
187
188 #define OUTB_OFF(np, o, val) iowrite8((val), np->s.ioaddr + (o))
189 #define OUTW_OFF(np, o, val) iowrite16((val), np->s.ioaddr + (o))
190 #define OUTL_OFF(np, o, val) iowrite32((val), np->s.ioaddr + (o))
191
192 #define INB(np, r) INB_OFF(np, offsetof(struct sym_reg, r))
193 #define INW(np, r) INW_OFF(np, offsetof(struct sym_reg, r))
194 #define INL(np, r) INL_OFF(np, offsetof(struct sym_reg, r))
195
196 #define OUTB(np, r, v) OUTB_OFF(np, offsetof(struct sym_reg, r), (v))
197 #define OUTW(np, r, v) OUTW_OFF(np, offsetof(struct sym_reg, r), (v))
198 #define OUTL(np, r, v) OUTL_OFF(np, offsetof(struct sym_reg, r), (v))
199
200 #define OUTONB(np, r, m) OUTB(np, r, INB(np, r) | (m))
201 #define OUTOFFB(np, r, m) OUTB(np, r, INB(np, r) & ~(m))
202 #define OUTONW(np, r, m) OUTW(np, r, INW(np, r) | (m))
203 #define OUTOFFW(np, r, m) OUTW(np, r, INW(np, r) & ~(m))
204 #define OUTONL(np, r, m) OUTL(np, r, INL(np, r) | (m))
205 #define OUTOFFL(np, r, m) OUTL(np, r, INL(np, r) & ~(m))
206
207
208
209
210
211
212 #define OUTL_DSP(np, v) \
213 do { \
214 MEMORY_WRITE_BARRIER(); \
215 OUTL(np, nc_dsp, (v)); \
216 } while (0)
217
218 #define OUTONB_STD() \
219 do { \
220 MEMORY_WRITE_BARRIER(); \
221 OUTONB(np, nc_dcntl, (STD|NOCOM)); \
222 } while (0)
223
224
225
226
227 #define HS_IDLE (0)
228 #define HS_BUSY (1)
229 #define HS_NEGOTIATE (2)
230 #define HS_DISCONNECT (3)
231 #define HS_WAIT (4)
232
233 #define HS_DONEMASK (0x80)
234 #define HS_COMPLETE (4|HS_DONEMASK)
235 #define HS_SEL_TIMEOUT (5|HS_DONEMASK)
236 #define HS_UNEXPECTED (6|HS_DONEMASK)
237 #define HS_COMP_ERR (7|HS_DONEMASK)
238
239
240
241
242 #define SIR_BAD_SCSI_STATUS (1)
243 #define SIR_SEL_ATN_NO_MSG_OUT (2)
244 #define SIR_MSG_RECEIVED (3)
245 #define SIR_MSG_WEIRD (4)
246 #define SIR_NEGO_FAILED (5)
247 #define SIR_NEGO_PROTO (6)
248 #define SIR_SCRIPT_STOPPED (7)
249 #define SIR_REJECT_TO_SEND (8)
250 #define SIR_SWIDE_OVERRUN (9)
251 #define SIR_SODL_UNDERRUN (10)
252 #define SIR_RESEL_NO_MSG_IN (11)
253 #define SIR_RESEL_NO_IDENTIFY (12)
254 #define SIR_RESEL_BAD_LUN (13)
255 #define SIR_TARGET_SELECTED (14)
256 #define SIR_RESEL_BAD_I_T_L (15)
257 #define SIR_RESEL_BAD_I_T_L_Q (16)
258 #define SIR_ABORT_SENT (17)
259 #define SIR_RESEL_ABORTED (18)
260 #define SIR_MSG_OUT_DONE (19)
261 #define SIR_COMPLETE_ERROR (20)
262 #define SIR_DATA_OVERRUN (21)
263 #define SIR_BAD_PHASE (22)
264 #if SYM_CONF_DMA_ADDRESSING_MODE == 2
265 #define SIR_DMAP_DIRTY (23)
266 #define SIR_MAX (23)
267 #else
268 #define SIR_MAX (22)
269 #endif
270
271
272
273
274
275 #define XE_EXTRA_DATA (1)
276 #define XE_BAD_PHASE (1<<1)
277 #define XE_PARITY_ERR (1<<2)
278 #define XE_SODL_UNRUN (1<<3)
279 #define XE_SWIDE_OVRUN (1<<4)
280
281
282
283
284
285 #define NS_SYNC (1)
286 #define NS_WIDE (2)
287 #define NS_PPR (3)
288
289
290
291
292
293 #define CCB_HASH_SHIFT 8
294 #define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT)
295 #define CCB_HASH_MASK (CCB_HASH_SIZE-1)
296 #if 1
297 #define CCB_HASH_CODE(dsa) \
298 (((dsa) >> (_LGRU16_(sizeof(struct sym_ccb)))) & CCB_HASH_MASK)
299 #else
300 #define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK)
301 #endif
302
303 #if SYM_CONF_DMA_ADDRESSING_MODE == 2
304
305
306
307
308 #define SYM_DMAP_SHIFT (4)
309 #define SYM_DMAP_SIZE (1u<<SYM_DMAP_SHIFT)
310 #define SYM_DMAP_MASK (SYM_DMAP_SIZE-1)
311 #endif
312
313
314
315
316 #define SYM_DISC_ENABLED (1)
317 #define SYM_TAGS_ENABLED (1<<1)
318 #define SYM_SCAN_BOOT_DISABLED (1<<2)
319 #define SYM_SCAN_LUNS_DISABLED (1<<3)
320
321
322
323
324 #define SYM_AVOID_BUS_RESET (1)
325
326
327
328
329 #define SYM_SNOOP_TIMEOUT (10000000)
330 #define BUS_8_BIT 0
331 #define BUS_16_BIT 1
332
333
334
335
336 struct sym_trans {
337 u8 period;
338 u8 offset;
339 unsigned int width:1;
340 unsigned int iu:1;
341 unsigned int dt:1;
342 unsigned int qas:1;
343 unsigned int check_nego:1;
344 unsigned int renego:2;
345 };
346
347
348
349
350
351
352
353
354
355
356 struct sym_tcbh {
357
358
359
360
361
362
363 u32 luntbl_sa;
364 u32 lun0_sa;
365
366
367
368
369
370 u_char uval;
371 u_char sval;
372 u_char filler1;
373 u_char wval;
374 };
375
376
377
378
379 struct sym_tcb {
380
381
382
383
384 struct sym_tcbh head;
385
386
387
388
389
390 u32 *luntbl;
391 int nlcb;
392
393
394
395
396 struct sym_lcb *lun0p;
397 #if SYM_CONF_MAX_LUN > 1
398 struct sym_lcb **lunmp;
399 #endif
400
401 #ifdef SYM_HAVE_STCB
402
403
404
405 struct sym_stcb s;
406 #endif
407
408
409 struct sym_trans tgoal;
410
411
412 struct sym_trans tprint;
413
414
415
416
417
418 struct sym_ccb * nego_cp;
419
420
421
422
423 u_char to_reset;
424
425
426
427
428
429 unsigned char usrflags;
430 unsigned char usr_period;
431 unsigned char usr_width;
432 unsigned short usrtags;
433 struct scsi_target *starget;
434 };
435
436
437
438
439
440
441
442
443
444
445 struct sym_lcbh {
446
447
448
449
450
451
452 u32 resel_sa;
453
454
455
456
457
458 u32 itl_task_sa;
459
460
461
462
463 u32 itlq_tbl_sa;
464 };
465
466
467
468
469 struct sym_lcb {
470
471
472
473
474 struct sym_lcbh head;
475
476
477
478
479
480
481 u32 *itlq_tbl;
482
483
484
485
486 u_short busy_itlq;
487 u_short busy_itl;
488
489
490
491
492 u_short ia_tag;
493 u_short if_tag;
494 u_char *cb_tags;
495
496
497
498
499 #ifdef SYM_HAVE_SLCB
500 struct sym_slcb s;
501 #endif
502
503 #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
504
505
506
507
508 SYM_QUEHEAD waiting_ccbq;
509 SYM_QUEHEAD started_ccbq;
510 int num_sgood;
511 u_short started_tags;
512 u_short started_no_tag;
513 u_short started_max;
514 u_short started_limit;
515 #endif
516
517 #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
518
519
520
521
522 u_char tags_si;
523 u_short tags_sum[2];
524 u_short tags_since;
525 #endif
526
527
528
529
530 u_char to_clear;
531
532
533
534
535 u_char user_flags;
536 u_char curr_flags;
537 };
538
539
540
541
542
543
544 struct sym_actscr {
545 u32 start;
546 u32 restart;
547 };
548
549
550
551
552
553
554
555
556 struct sym_pmc {
557 struct sym_tblmove sg;
558 u32 ret;
559 };
560
561
562
563
564
565
566
567 #if SYM_CONF_MAX_LUN <= 1
568 #define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : NULL
569 #else
570 #define sym_lp(tp, lun) \
571 (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[((u8)lun)] : NULL
572 #endif
573
574
575
576
577
578
579
580
581
582
583
584
585
586 #define HX_REG scr0
587 #define HX_PRT nc_scr0
588 #define HS_REG scr1
589 #define HS_PRT nc_scr1
590 #define SS_REG scr2
591 #define SS_PRT nc_scr2
592 #define HF_REG scr3
593 #define HF_PRT nc_scr3
594
595
596
597
598 #define host_xflags phys.head.status[0]
599 #define host_status phys.head.status[1]
600 #define ssss_status phys.head.status[2]
601 #define host_flags phys.head.status[3]
602
603
604
605
606 #define HF_IN_PM0 1u
607 #define HF_IN_PM1 (1u<<1)
608 #define HF_ACT_PM (1u<<2)
609 #define HF_DP_SAVED (1u<<3)
610 #define HF_SENSE (1u<<4)
611 #define HF_EXT_ERR (1u<<5)
612 #define HF_DATA_IN (1u<<6)
613 #ifdef SYM_CONF_IARB_SUPPORT
614 #define HF_HINT_IARB (1u<<7)
615 #endif
616
617
618
619
620 #if SYM_CONF_DMA_ADDRESSING_MODE == 2
621 #define HX_DMAP_DIRTY (1u<<7)
622 #endif
623
624
625
626
627
628
629
630
631
632
633
634
635 struct sym_ccbh {
636
637
638
639 struct sym_actscr go;
640
641
642
643
644
645
646
647 u32 savep;
648 u32 lastp;
649
650
651
652
653 u8 status[4];
654 };
655
656
657
658
659
660
661
662
663 #if SYM_CONF_GENERIC_SUPPORT
664 #define sym_set_script_dp(np, cp, dp) \
665 do { \
666 if (np->features & FE_LDSTR) \
667 cp->phys.head.lastp = cpu_to_scr(dp); \
668 else \
669 np->ccb_head.lastp = cpu_to_scr(dp); \
670 } while (0)
671 #define sym_get_script_dp(np, cp) \
672 scr_to_cpu((np->features & FE_LDSTR) ? \
673 cp->phys.head.lastp : np->ccb_head.lastp)
674 #else
675 #define sym_set_script_dp(np, cp, dp) \
676 do { \
677 cp->phys.head.lastp = cpu_to_scr(dp); \
678 } while (0)
679
680 #define sym_get_script_dp(np, cp) (cp->phys.head.lastp)
681 #endif
682
683
684
685
686
687
688
689
690 struct sym_dsb {
691
692
693
694
695 struct sym_ccbh head;
696
697
698
699
700
701
702
703 struct sym_pmc pm0;
704 struct sym_pmc pm1;
705
706
707
708
709 struct sym_tblsel select;
710 struct sym_tblmove smsg;
711 struct sym_tblmove smsg_ext;
712 struct sym_tblmove cmd;
713 struct sym_tblmove sense;
714 struct sym_tblmove wresid;
715 struct sym_tblmove data [SYM_CONF_MAX_SG];
716 };
717
718
719
720
721 struct sym_ccb {
722
723
724
725
726
727 struct sym_dsb phys;
728
729
730
731
732 struct scsi_cmnd *cmd;
733 u8 cdb_buf[16];
734 #define SYM_SNS_BBUF_LEN 32
735 u8 sns_bbuf[SYM_SNS_BBUF_LEN];
736 int data_len;
737 int segments;
738
739 u8 order;
740 unsigned char odd_byte_adjustment;
741
742 u_char nego_status;
743 u_char xerr_status;
744 u32 extra_bytes;
745
746
747
748
749
750
751
752
753
754
755 u_char scsi_smsg [12];
756 u_char scsi_smsg2[12];
757
758
759
760
761 u_char sensecmd[6];
762 u_char sv_scsi_status;
763 u_char sv_xerr_status;
764 int sv_resid;
765
766
767
768
769 u32 ccb_ba;
770 u_short tag;
771
772 u_char target;
773 u_char lun;
774 struct sym_ccb *link_ccbh;
775 SYM_QUEHEAD link_ccbq;
776 u32 startp;
777 u32 goalp;
778 int ext_sg;
779 int ext_ofs;
780 #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
781 SYM_QUEHEAD link2_ccbq;
782 u_char started;
783 #endif
784 u_char to_abort;
785 #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
786 u_char tags_si;
787 #endif
788 };
789
790 #define CCB_BA(cp,lbl) cpu_to_scr(cp->ccb_ba + offsetof(struct sym_ccb, lbl))
791
792 typedef struct device *m_pool_ident_t;
793
794
795
796
797 struct sym_hcb {
798
799
800
801
802
803
804 #if SYM_CONF_GENERIC_SUPPORT
805 struct sym_ccbh ccb_head;
806 struct sym_tcbh tcb_head;
807 struct sym_lcbh lcb_head;
808 #endif
809
810
811
812
813 struct sym_actscr idletask, notask, bad_itl, bad_itlq;
814 u32 idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba;
815
816
817
818
819
820 u32 *badluntbl;
821 u32 badlun_sa;
822
823
824
825
826 u32 hcb_ba;
827
828
829
830
831
832
833 u32 scr_ram_seg;
834
835
836
837
838
839
840 u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4,
841 sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4,
842 sv_stest1;
843
844
845
846
847
848
849 u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4,
850 rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4;
851
852
853
854
855 struct sym_tcb target[SYM_CONF_MAX_TARGET];
856
857
858
859
860
861 u32 *targtbl;
862 u32 targtbl_ba;
863
864
865
866
867 m_pool_ident_t bus_dmat;
868
869
870
871
872 struct sym_shcb s;
873
874
875
876
877 u32 mmio_ba;
878 u32 ram_ba;
879
880
881
882
883
884
885
886 u_char *scripta0;
887 u_char *scriptb0;
888 u_char *scriptz0;
889 u32 scripta_ba;
890 u32 scriptb_ba;
891 u32 scriptz_ba;
892 u_short scripta_sz;
893 u_short scriptb_sz;
894 u_short scriptz_sz;
895
896
897
898
899
900 struct sym_fwa_ba fwa_bas;
901 struct sym_fwb_ba fwb_bas;
902 struct sym_fwz_ba fwz_bas;
903 void (*fw_setup)(struct sym_hcb *np, struct sym_fw *fw);
904 void (*fw_patch)(struct Scsi_Host *);
905 char *fw_name;
906
907
908
909
910 u_int features;
911 u_char myaddr;
912 u_char maxburst;
913 u_char maxwide;
914 u_char minsync;
915 u_char maxsync;
916 u_char maxoffs;
917 u_char minsync_dt;
918 u_char maxsync_dt;
919 u_char maxoffs_dt;
920 u_char multiplier;
921 u_char clock_divn;
922 u32 clock_khz;
923 u32 pciclk_khz;
924
925
926
927
928
929 volatile
930 u32 *squeue;
931 u32 squeue_ba;
932 u_short squeueput;
933 u_short actccbs;
934
935
936
937
938
939 u_short dqueueget;
940 volatile
941 u32 *dqueue;
942 u32 dqueue_ba;
943
944
945
946
947
948
949 u_char msgout[8];
950 u_char msgin [8];
951 u32 lastmsg;
952 u32 scratch;
953
954
955
956
957 u_char usrflags;
958 u_char scsi_mode;
959 u_char verbose;
960
961
962
963
964 struct sym_ccb **ccbh;
965
966 SYM_QUEHEAD free_ccbq;
967 SYM_QUEHEAD busy_ccbq;
968
969
970
971
972
973
974
975 SYM_QUEHEAD comp_ccbq;
976
977 #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
978 SYM_QUEHEAD dummy_ccbq;
979 #endif
980
981
982
983
984
985
986
987
988
989
990
991
992
993 #ifdef SYM_CONF_IARB_SUPPORT
994 u_short iarb_max;
995 u_short iarb_count;
996 struct sym_ccb * last_cp;
997 #endif
998
999
1000
1001
1002
1003
1004 u_char abrt_msg[4];
1005 struct sym_tblmove abrt_tbl;
1006 struct sym_tblsel abrt_sel;
1007 u_char istat_sem;
1008
1009
1010
1011
1012 #if SYM_CONF_DMA_ADDRESSING_MODE != 0
1013 u_char use_dac;
1014 #if SYM_CONF_DMA_ADDRESSING_MODE == 2
1015 u_char dmap_dirty;
1016 u32 dmap_bah[SYM_DMAP_SIZE];
1017 #endif
1018 #endif
1019 };
1020
1021 #if SYM_CONF_DMA_ADDRESSING_MODE == 0
1022 #define use_dac(np) 0
1023 #define set_dac(np) do { } while (0)
1024 #else
1025 #define use_dac(np) (np)->use_dac
1026 #define set_dac(np) (np)->use_dac = 1
1027 #endif
1028
1029 #define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl))
1030
1031
1032
1033
1034
1035 struct sym_fw * sym_find_firmware(struct sym_chip *chip);
1036 void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len);
1037
1038
1039
1040
1041 char *sym_driver_name(void);
1042 void sym_print_xerr(struct scsi_cmnd *cmd, int x_status);
1043 int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int);
1044 struct sym_chip *sym_lookup_chip_table(u_short device_id, u_char revision);
1045 #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
1046 void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn);
1047 #else
1048 void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp);
1049 #endif
1050 void sym_start_up(struct Scsi_Host *, int reason);
1051 irqreturn_t sym_interrupt(struct Scsi_Host *);
1052 int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task);
1053 struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order);
1054 void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp);
1055 struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1056 int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1057 int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
1058 int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out);
1059 int sym_reset_scsi_target(struct sym_hcb *np, int target);
1060 void sym_hcb_free(struct sym_hcb *np);
1061 int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram);
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071 #if SYM_CONF_DMA_ADDRESSING_MODE == 0
1072 #define DMA_DAC_MASK DMA_BIT_MASK(32)
1073 #define sym_build_sge(np, data, badd, len) \
1074 do { \
1075 (data)->addr = cpu_to_scr(badd); \
1076 (data)->size = cpu_to_scr(len); \
1077 } while (0)
1078 #elif SYM_CONF_DMA_ADDRESSING_MODE == 1
1079 #define DMA_DAC_MASK DMA_BIT_MASK(40)
1080 #define sym_build_sge(np, data, badd, len) \
1081 do { \
1082 (data)->addr = cpu_to_scr(badd); \
1083 (data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len); \
1084 } while (0)
1085 #elif SYM_CONF_DMA_ADDRESSING_MODE == 2
1086 #define DMA_DAC_MASK DMA_BIT_MASK(64)
1087 int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s);
1088 static inline void
1089 sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len)
1090 {
1091 u32 h = (badd>>32);
1092 int s = (h&SYM_DMAP_MASK);
1093
1094 if (h != np->dmap_bah[s])
1095 goto bad;
1096 good:
1097 (data)->addr = cpu_to_scr(badd);
1098 (data)->size = cpu_to_scr((s<<24) + len);
1099 return;
1100 bad:
1101 s = sym_lookup_dmap(np, h, s);
1102 goto good;
1103 }
1104 #else
1105 #error "Unsupported DMA addressing mode"
1106 #endif
1107
1108
1109
1110
1111
1112 #define sym_get_mem_cluster() \
1113 (void *) __get_free_pages(GFP_ATOMIC, SYM_MEM_PAGE_ORDER)
1114 #define sym_free_mem_cluster(p) \
1115 free_pages((unsigned long)p, SYM_MEM_PAGE_ORDER)
1116
1117
1118
1119
1120 typedef struct sym_m_link {
1121 struct sym_m_link *next;
1122 } *m_link_p;
1123
1124
1125
1126
1127
1128 typedef struct sym_m_vtob {
1129 struct sym_m_vtob *next;
1130 void *vaddr;
1131 dma_addr_t baddr;
1132 } *m_vtob_p;
1133
1134
1135 #define VTOB_HASH_SHIFT 5
1136 #define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
1137 #define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
1138 #define VTOB_HASH_CODE(m) \
1139 ((((unsigned long)(m)) >> SYM_MEM_CLUSTER_SHIFT) & VTOB_HASH_MASK)
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150 typedef struct sym_m_pool {
1151 m_pool_ident_t dev_dmat;
1152 void * (*get_mem_cluster)(struct sym_m_pool *);
1153 #ifdef SYM_MEM_FREE_UNUSED
1154 void (*free_mem_cluster)(struct sym_m_pool *, void *);
1155 #endif
1156 #define M_GET_MEM_CLUSTER() mp->get_mem_cluster(mp)
1157 #define M_FREE_MEM_CLUSTER(p) mp->free_mem_cluster(mp, p)
1158 int nump;
1159 m_vtob_p vtob[VTOB_HASH_SIZE];
1160 struct sym_m_pool *next;
1161 struct sym_m_link h[SYM_MEM_CLUSTER_SHIFT - SYM_MEM_SHIFT + 1];
1162 } *m_pool_p;
1163
1164
1165
1166
1167
1168 void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name);
1169 void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name);
1170 dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m);
1171
1172
1173
1174
1175
1176
1177 #define _uvptv_(p) ((void *)((u_long)(p)))
1178
1179 #define _sym_calloc_dma(np, l, n) __sym_calloc_dma(np->bus_dmat, l, n)
1180 #define _sym_mfree_dma(np, p, l, n) \
1181 __sym_mfree_dma(np->bus_dmat, _uvptv_(p), l, n)
1182 #define sym_calloc_dma(l, n) _sym_calloc_dma(np, l, n)
1183 #define sym_mfree_dma(p, l, n) _sym_mfree_dma(np, p, l, n)
1184 #define vtobus(p) __vtobus(np->bus_dmat, _uvptv_(p))
1185
1186
1187
1188
1189
1190
1191 #define sym_m_pool_match(mp_id1, mp_id2) (mp_id1 == mp_id2)
1192
1193 static inline void *sym_m_get_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
1194 {
1195 void *vaddr = NULL;
1196 dma_addr_t baddr = 0;
1197
1198 vaddr = dma_alloc_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, &baddr,
1199 GFP_ATOMIC);
1200 if (vaddr) {
1201 vbp->vaddr = vaddr;
1202 vbp->baddr = baddr;
1203 }
1204 return vaddr;
1205 }
1206
1207 static inline void sym_m_free_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
1208 {
1209 dma_free_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, vbp->vaddr,
1210 vbp->baddr);
1211 }
1212
1213 #endif