This source file includes following definitions.
- claim_dma_lock
- release_dma_lock
- enable_dma
- disable_dma
- clear_dma_ff
- set_dma_mode
- set_dma_ext_mode
- set_dma_page
- set_dma_addr
- set_dma_count
- get_dma_residue
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #ifndef _ASM_DMA_H
20 #define _ASM_DMA_H
21
22 #include <linux/spinlock.h>
23 #include <asm/io.h>
24
25 #define dma_outb outb
26 #define dma_inb inb
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76 #define MAX_DMA_CHANNELS 8
77
78
79
80
81
82
83
84
85
86
87
88 #define ALPHA_XL_MAX_ISA_DMA_ADDRESS 0x04000000UL
89
90
91
92
93 #define ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS 0x01000000UL
94
95
96
97
98 #define ALPHA_SABLE_MAX_ISA_DMA_ADDRESS 0x80000000UL
99 #define ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS 0x80000000UL
100
101
102
103
104
105 #define ALPHA_MAX_ISA_DMA_ADDRESS 0x100000000UL
106
107 #ifdef CONFIG_ALPHA_GENERIC
108 # define MAX_ISA_DMA_ADDRESS (alpha_mv.max_isa_dma_address)
109 #else
110 # if defined(CONFIG_ALPHA_XL)
111 # define MAX_ISA_DMA_ADDRESS ALPHA_XL_MAX_ISA_DMA_ADDRESS
112 # elif defined(CONFIG_ALPHA_RUFFIAN)
113 # define MAX_ISA_DMA_ADDRESS ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS
114 # elif defined(CONFIG_ALPHA_SABLE)
115 # define MAX_ISA_DMA_ADDRESS ALPHA_SABLE_MAX_ISA_DMA_ADDRESS
116 # elif defined(CONFIG_ALPHA_ALCOR)
117 # define MAX_ISA_DMA_ADDRESS ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS
118 # else
119 # define MAX_ISA_DMA_ADDRESS ALPHA_MAX_ISA_DMA_ADDRESS
120 # endif
121 #endif
122
123
124
125
126 #define MAX_DMA_ADDRESS (alpha_mv.mv_pci_tbi ? \
127 ~0UL : IDENT_ADDR + 0x01000000)
128
129
130 #define IO_DMA1_BASE 0x00
131 #define IO_DMA2_BASE 0xC0
132
133
134 #define DMA1_CMD_REG 0x08
135 #define DMA1_STAT_REG 0x08
136 #define DMA1_REQ_REG 0x09
137 #define DMA1_MASK_REG 0x0A
138 #define DMA1_MODE_REG 0x0B
139 #define DMA1_CLEAR_FF_REG 0x0C
140 #define DMA1_TEMP_REG 0x0D
141 #define DMA1_RESET_REG 0x0D
142 #define DMA1_CLR_MASK_REG 0x0E
143 #define DMA1_MASK_ALL_REG 0x0F
144 #define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG)
145
146 #define DMA2_CMD_REG 0xD0
147 #define DMA2_STAT_REG 0xD0
148 #define DMA2_REQ_REG 0xD2
149 #define DMA2_MASK_REG 0xD4
150 #define DMA2_MODE_REG 0xD6
151 #define DMA2_CLEAR_FF_REG 0xD8
152 #define DMA2_TEMP_REG 0xDA
153 #define DMA2_RESET_REG 0xDA
154 #define DMA2_CLR_MASK_REG 0xDC
155 #define DMA2_MASK_ALL_REG 0xDE
156 #define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG)
157
158 #define DMA_ADDR_0 0x00
159 #define DMA_ADDR_1 0x02
160 #define DMA_ADDR_2 0x04
161 #define DMA_ADDR_3 0x06
162 #define DMA_ADDR_4 0xC0
163 #define DMA_ADDR_5 0xC4
164 #define DMA_ADDR_6 0xC8
165 #define DMA_ADDR_7 0xCC
166
167 #define DMA_CNT_0 0x01
168 #define DMA_CNT_1 0x03
169 #define DMA_CNT_2 0x05
170 #define DMA_CNT_3 0x07
171 #define DMA_CNT_4 0xC2
172 #define DMA_CNT_5 0xC6
173 #define DMA_CNT_6 0xCA
174 #define DMA_CNT_7 0xCE
175
176 #define DMA_PAGE_0 0x87
177 #define DMA_PAGE_1 0x83
178 #define DMA_PAGE_2 0x81
179 #define DMA_PAGE_3 0x82
180 #define DMA_PAGE_5 0x8B
181 #define DMA_PAGE_6 0x89
182 #define DMA_PAGE_7 0x8A
183
184 #define DMA_HIPAGE_0 (0x400 | DMA_PAGE_0)
185 #define DMA_HIPAGE_1 (0x400 | DMA_PAGE_1)
186 #define DMA_HIPAGE_2 (0x400 | DMA_PAGE_2)
187 #define DMA_HIPAGE_3 (0x400 | DMA_PAGE_3)
188 #define DMA_HIPAGE_4 (0x400 | DMA_PAGE_4)
189 #define DMA_HIPAGE_5 (0x400 | DMA_PAGE_5)
190 #define DMA_HIPAGE_6 (0x400 | DMA_PAGE_6)
191 #define DMA_HIPAGE_7 (0x400 | DMA_PAGE_7)
192
193 #define DMA_MODE_READ 0x44
194 #define DMA_MODE_WRITE 0x48
195 #define DMA_MODE_CASCADE 0xC0
196
197 #define DMA_AUTOINIT 0x10
198
199 extern spinlock_t dma_spin_lock;
200
201 static __inline__ unsigned long claim_dma_lock(void)
202 {
203 unsigned long flags;
204 spin_lock_irqsave(&dma_spin_lock, flags);
205 return flags;
206 }
207
208 static __inline__ void release_dma_lock(unsigned long flags)
209 {
210 spin_unlock_irqrestore(&dma_spin_lock, flags);
211 }
212
213
214 static __inline__ void enable_dma(unsigned int dmanr)
215 {
216 if (dmanr<=3)
217 dma_outb(dmanr, DMA1_MASK_REG);
218 else
219 dma_outb(dmanr & 3, DMA2_MASK_REG);
220 }
221
222 static __inline__ void disable_dma(unsigned int dmanr)
223 {
224 if (dmanr<=3)
225 dma_outb(dmanr | 4, DMA1_MASK_REG);
226 else
227 dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
228 }
229
230
231
232
233
234
235
236
237 static __inline__ void clear_dma_ff(unsigned int dmanr)
238 {
239 if (dmanr<=3)
240 dma_outb(0, DMA1_CLEAR_FF_REG);
241 else
242 dma_outb(0, DMA2_CLEAR_FF_REG);
243 }
244
245
246 static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
247 {
248 if (dmanr<=3)
249 dma_outb(mode | dmanr, DMA1_MODE_REG);
250 else
251 dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
252 }
253
254
255 static __inline__ void set_dma_ext_mode(unsigned int dmanr, char ext_mode)
256 {
257 if (dmanr<=3)
258 dma_outb(ext_mode | dmanr, DMA1_EXT_MODE_REG);
259 else
260 dma_outb(ext_mode | (dmanr&3), DMA2_EXT_MODE_REG);
261 }
262
263
264
265
266
267 static __inline__ void set_dma_page(unsigned int dmanr, unsigned int pagenr)
268 {
269 switch(dmanr) {
270 case 0:
271 dma_outb(pagenr, DMA_PAGE_0);
272 dma_outb((pagenr >> 8), DMA_HIPAGE_0);
273 break;
274 case 1:
275 dma_outb(pagenr, DMA_PAGE_1);
276 dma_outb((pagenr >> 8), DMA_HIPAGE_1);
277 break;
278 case 2:
279 dma_outb(pagenr, DMA_PAGE_2);
280 dma_outb((pagenr >> 8), DMA_HIPAGE_2);
281 break;
282 case 3:
283 dma_outb(pagenr, DMA_PAGE_3);
284 dma_outb((pagenr >> 8), DMA_HIPAGE_3);
285 break;
286 case 5:
287 dma_outb(pagenr & 0xfe, DMA_PAGE_5);
288 dma_outb((pagenr >> 8), DMA_HIPAGE_5);
289 break;
290 case 6:
291 dma_outb(pagenr & 0xfe, DMA_PAGE_6);
292 dma_outb((pagenr >> 8), DMA_HIPAGE_6);
293 break;
294 case 7:
295 dma_outb(pagenr & 0xfe, DMA_PAGE_7);
296 dma_outb((pagenr >> 8), DMA_HIPAGE_7);
297 break;
298 }
299 }
300
301
302
303
304
305 static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
306 {
307 if (dmanr <= 3) {
308 dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
309 dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
310 } else {
311 dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
312 dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
313 }
314 set_dma_page(dmanr, a>>16);
315 }
316
317
318
319
320
321
322
323
324
325
326 static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
327 {
328 count--;
329 if (dmanr <= 3) {
330 dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
331 dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
332 } else {
333 dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
334 dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
335 }
336 }
337
338
339
340
341
342
343
344
345
346
347 static __inline__ int get_dma_residue(unsigned int dmanr)
348 {
349 unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
350 : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
351
352
353 unsigned short count;
354
355 count = 1 + dma_inb(io_port);
356 count += dma_inb(io_port) << 8;
357
358 return (dmanr<=3)? count : (count<<1);
359 }
360
361
362
363 extern int request_dma(unsigned int dmanr, const char * device_id);
364 extern void free_dma(unsigned int dmanr);
365 #define KERNEL_HAVE_CHECK_DMA
366 extern int check_dma(unsigned int dmanr);
367
368
369
370 #ifdef CONFIG_PCI
371 extern int isa_dma_bridge_buggy;
372 #else
373 #define isa_dma_bridge_buggy (0)
374 #endif
375
376
377 #endif