This source file includes following definitions.
- claim_dma_lock
- release_dma_lock
- enable_dma
- disable_dma
- clear_dma_ff
- set_dma_mode
- set_dma_page
- set_dma_addr
- set_dma_count
- get_dma_residue
1
2
3
4
5
6
7
8
9
10
11
12
13 #ifndef _ASM_DMA_H
14 #define _ASM_DMA_H
15
16 #include <asm/io.h>
17 #include <linux/spinlock.h>
18 #include <linux/delay.h>
19
20
21 #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
22 #define dma_outb outb_p
23 #else
24 #define dma_outb outb
25 #endif
26
27 #define dma_inb inb
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77 #ifndef CONFIG_GENERIC_ISA_DMA_SUPPORT_BROKEN
78 #define MAX_DMA_CHANNELS 8
79 #endif
80
81
82
83
84
85
86
87 #if defined(CONFIG_SGI_IP22) || defined(CONFIG_SGI_IP28)
88
89 #define MAX_DMA_ADDRESS PAGE_OFFSET
90 #else
91 #define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000)
92 #endif
93 #define MAX_DMA_PFN PFN_DOWN(virt_to_phys((void *)MAX_DMA_ADDRESS))
94
95 #ifndef MAX_DMA32_PFN
96 #define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
97 #endif
98
99
100 #define IO_DMA1_BASE 0x00
101 #define IO_DMA2_BASE 0xC0
102
103
104 #define DMA1_CMD_REG 0x08
105 #define DMA1_STAT_REG 0x08
106 #define DMA1_REQ_REG 0x09
107 #define DMA1_MASK_REG 0x0A
108 #define DMA1_MODE_REG 0x0B
109 #define DMA1_CLEAR_FF_REG 0x0C
110 #define DMA1_TEMP_REG 0x0D
111 #define DMA1_RESET_REG 0x0D
112 #define DMA1_CLR_MASK_REG 0x0E
113 #define DMA1_MASK_ALL_REG 0x0F
114
115 #define DMA2_CMD_REG 0xD0
116 #define DMA2_STAT_REG 0xD0
117 #define DMA2_REQ_REG 0xD2
118 #define DMA2_MASK_REG 0xD4
119 #define DMA2_MODE_REG 0xD6
120 #define DMA2_CLEAR_FF_REG 0xD8
121 #define DMA2_TEMP_REG 0xDA
122 #define DMA2_RESET_REG 0xDA
123 #define DMA2_CLR_MASK_REG 0xDC
124 #define DMA2_MASK_ALL_REG 0xDE
125
126 #define DMA_ADDR_0 0x00
127 #define DMA_ADDR_1 0x02
128 #define DMA_ADDR_2 0x04
129 #define DMA_ADDR_3 0x06
130 #define DMA_ADDR_4 0xC0
131 #define DMA_ADDR_5 0xC4
132 #define DMA_ADDR_6 0xC8
133 #define DMA_ADDR_7 0xCC
134
135 #define DMA_CNT_0 0x01
136 #define DMA_CNT_1 0x03
137 #define DMA_CNT_2 0x05
138 #define DMA_CNT_3 0x07
139 #define DMA_CNT_4 0xC2
140 #define DMA_CNT_5 0xC6
141 #define DMA_CNT_6 0xCA
142 #define DMA_CNT_7 0xCE
143
144 #define DMA_PAGE_0 0x87
145 #define DMA_PAGE_1 0x83
146 #define DMA_PAGE_2 0x81
147 #define DMA_PAGE_3 0x82
148 #define DMA_PAGE_5 0x8B
149 #define DMA_PAGE_6 0x89
150 #define DMA_PAGE_7 0x8A
151
152 #define DMA_MODE_READ 0x44
153 #define DMA_MODE_WRITE 0x48
154 #define DMA_MODE_CASCADE 0xC0
155
156 #define DMA_AUTOINIT 0x10
157
158 extern spinlock_t dma_spin_lock;
159
160 static __inline__ unsigned long claim_dma_lock(void)
161 {
162 unsigned long flags;
163 spin_lock_irqsave(&dma_spin_lock, flags);
164 return flags;
165 }
166
167 static __inline__ void release_dma_lock(unsigned long flags)
168 {
169 spin_unlock_irqrestore(&dma_spin_lock, flags);
170 }
171
172
173 static __inline__ void enable_dma(unsigned int dmanr)
174 {
175 if (dmanr<=3)
176 dma_outb(dmanr, DMA1_MASK_REG);
177 else
178 dma_outb(dmanr & 3, DMA2_MASK_REG);
179 }
180
181 static __inline__ void disable_dma(unsigned int dmanr)
182 {
183 if (dmanr<=3)
184 dma_outb(dmanr | 4, DMA1_MASK_REG);
185 else
186 dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
187 }
188
189
190
191
192
193
194
195
196 static __inline__ void clear_dma_ff(unsigned int dmanr)
197 {
198 if (dmanr<=3)
199 dma_outb(0, DMA1_CLEAR_FF_REG);
200 else
201 dma_outb(0, DMA2_CLEAR_FF_REG);
202 }
203
204
205 static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
206 {
207 if (dmanr<=3)
208 dma_outb(mode | dmanr, DMA1_MODE_REG);
209 else
210 dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
211 }
212
213
214
215
216
217
218 static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
219 {
220 switch(dmanr) {
221 case 0:
222 dma_outb(pagenr, DMA_PAGE_0);
223 break;
224 case 1:
225 dma_outb(pagenr, DMA_PAGE_1);
226 break;
227 case 2:
228 dma_outb(pagenr, DMA_PAGE_2);
229 break;
230 case 3:
231 dma_outb(pagenr, DMA_PAGE_3);
232 break;
233 case 5:
234 dma_outb(pagenr & 0xfe, DMA_PAGE_5);
235 break;
236 case 6:
237 dma_outb(pagenr & 0xfe, DMA_PAGE_6);
238 break;
239 case 7:
240 dma_outb(pagenr & 0xfe, DMA_PAGE_7);
241 break;
242 }
243 }
244
245
246
247
248
249 static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
250 {
251 set_dma_page(dmanr, a>>16);
252 if (dmanr <= 3) {
253 dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
254 dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
255 } else {
256 dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
257 dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
258 }
259 }
260
261
262
263
264
265
266
267
268
269
270 static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
271 {
272 count--;
273 if (dmanr <= 3) {
274 dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
275 dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
276 } else {
277 dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
278 dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
279 }
280 }
281
282
283
284
285
286
287
288
289
290
291 static __inline__ int get_dma_residue(unsigned int dmanr)
292 {
293 unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
294 : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
295
296
297 unsigned short count;
298
299 count = 1 + dma_inb(io_port);
300 count += dma_inb(io_port) << 8;
301
302 return (dmanr<=3)? count : (count<<1);
303 }
304
305
306
307 extern int request_dma(unsigned int dmanr, const char * device_id);
308 extern void free_dma(unsigned int dmanr);
309
310
311
312 #ifdef CONFIG_PCI
313 extern int isa_dma_bridge_buggy;
314 #else
315 #define isa_dma_bridge_buggy (0)
316 #endif
317
318 #endif