This source file includes following definitions.
- set_io_port_base
- virt_to_phys
- phys_to_virt
- isa_virt_to_bus
- isa_bus_to_virt
- __ioremap_mode
- ioremap_prot
- iounmap
- BUILDSTRING
- memcpy_fromio
- memcpy_toio
1
2
3
4
5
6
7
8
9
10
11
12 #ifndef _ASM_IO_H
13 #define _ASM_IO_H
14
15 #define ARCH_HAS_IOREMAP_WC
16
17 #include <linux/compiler.h>
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/irqflags.h>
21
22 #include <asm/addrspace.h>
23 #include <asm/barrier.h>
24 #include <asm/bug.h>
25 #include <asm/byteorder.h>
26 #include <asm/cpu.h>
27 #include <asm/cpu-features.h>
28 #include <asm-generic/iomap.h>
29 #include <asm/page.h>
30 #include <asm/pgtable-bits.h>
31 #include <asm/processor.h>
32 #include <asm/string.h>
33
34 #include <ioremap.h>
35 #include <mangle-port.h>
36
37
38
39
40
41
42
43 # define __raw_ioswabb(a, x) (x)
44 # define __raw_ioswabw(a, x) (x)
45 # define __raw_ioswabl(a, x) (x)
46 # define __raw_ioswabq(a, x) (x)
47 # define ____raw_ioswabq(a, x) (x)
48
49 # define __relaxed_ioswabb ioswabb
50 # define __relaxed_ioswabw ioswabw
51 # define __relaxed_ioswabl ioswabl
52 # define __relaxed_ioswabq ioswabq
53
54
55
56 #define IO_SPACE_LIMIT 0xffff
57
58
59
60
61
62
63
64
65
66 extern unsigned long mips_io_port_base;
67
68 static inline void set_io_port_base(unsigned long base)
69 {
70 mips_io_port_base = base;
71 }
72
73
74
75
76
77
78
79 #define HAVE_ARCH_PIO_SIZE
80 #define PIO_OFFSET mips_io_port_base
81 #define PIO_MASK IO_SPACE_LIMIT
82 #define PIO_RESERVED 0x0UL
83
84
85
86
87
88
89
90 #define iobarrier_rw() mb()
91 #define iobarrier_r() rmb()
92 #define iobarrier_w() wmb()
93 #define iobarrier_sync() iob()
94
95
96
97
98
99
100
101
102
103
104
105
106
107 static inline unsigned long virt_to_phys(volatile const void *address)
108 {
109 return __pa(address);
110 }
111
112
113
114
115
116
117
118
119
120
121
122
123
124 static inline void * phys_to_virt(unsigned long address)
125 {
126 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
127 }
128
129
130
131
132 static inline unsigned long isa_virt_to_bus(volatile void *address)
133 {
134 return virt_to_phys(address);
135 }
136
137 static inline void *isa_bus_to_virt(unsigned long address)
138 {
139 return phys_to_virt(address);
140 }
141
142
143
144
145
146
147
148 #define virt_to_bus virt_to_phys
149 #define bus_to_virt phys_to_virt
150
151
152
153
154 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
155
156 extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags);
157 extern void __iounmap(const volatile void __iomem *addr);
158
159 static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size,
160 unsigned long flags)
161 {
162 void __iomem *addr = plat_ioremap(offset, size, flags);
163
164 if (addr)
165 return addr;
166
167 #define __IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
168
169 if (cpu_has_64bit_addresses) {
170 u64 base = UNCAC_BASE;
171
172
173
174
175
176 if (flags == _CACHE_UNCACHED)
177 base = (u64) IO_BASE;
178 return (void __iomem *) (unsigned long) (base + offset);
179 } else if (__builtin_constant_p(offset) &&
180 __builtin_constant_p(size) && __builtin_constant_p(flags)) {
181 phys_addr_t phys_addr, last_addr;
182
183 phys_addr = fixup_bigphys_addr(offset, size);
184
185
186 last_addr = phys_addr + size - 1;
187 if (!size || last_addr < phys_addr)
188 return NULL;
189
190
191
192
193
194 if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) &&
195 flags == _CACHE_UNCACHED)
196 return (void __iomem *)
197 (unsigned long)CKSEG1ADDR(phys_addr);
198 }
199
200 return __ioremap(offset, size, flags);
201
202 #undef __IS_LOW512
203 }
204
205
206
207
208
209
210
211
212 static inline void __iomem *ioremap_prot(phys_addr_t offset,
213 unsigned long size, unsigned long prot_val) {
214 return __ioremap_mode(offset, size, prot_val & _CACHE_MASK);
215 }
216
217
218
219
220
221
222
223
224
225
226
227
228 #define ioremap(offset, size) \
229 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250 #define ioremap_nocache(offset, size) \
251 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
252 #define ioremap_uc ioremap_nocache
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269 #define ioremap_cache(offset, size) \
270 __ioremap_mode((offset), (size), _page_cachable_default)
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290 #define ioremap_wc(offset, size) \
291 __ioremap_mode((offset), (size), boot_cpu_data.writecombine)
292
293 static inline void iounmap(const volatile void __iomem *addr)
294 {
295 if (plat_iounmap(addr))
296 return;
297
298 #define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
299
300 if (cpu_has_64bit_addresses ||
301 (__builtin_constant_p(addr) && __IS_KSEG1(addr)))
302 return;
303
304 __iounmap(addr);
305
306 #undef __IS_KSEG1
307 }
308
309 #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON3)
310 #define war_io_reorder_wmb() wmb()
311 #else
312 #define war_io_reorder_wmb() barrier()
313 #endif
314
315 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \
316 \
317 static inline void pfx##write##bwlq(type val, \
318 volatile void __iomem *mem) \
319 { \
320 volatile type *__mem; \
321 type __val; \
322 \
323 if (barrier) \
324 iobarrier_rw(); \
325 else \
326 war_io_reorder_wmb(); \
327 \
328 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
329 \
330 __val = pfx##ioswab##bwlq(__mem, val); \
331 \
332 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
333 *__mem = __val; \
334 else if (cpu_has_64bits) { \
335 unsigned long __flags; \
336 type __tmp; \
337 \
338 if (irq) \
339 local_irq_save(__flags); \
340 __asm__ __volatile__( \
341 ".set push" "\t\t# __writeq""\n\t" \
342 ".set arch=r4000" "\n\t" \
343 "dsll32 %L0, %L0, 0" "\n\t" \
344 "dsrl32 %L0, %L0, 0" "\n\t" \
345 "dsll32 %M0, %M0, 0" "\n\t" \
346 "or %L0, %L0, %M0" "\n\t" \
347 "sd %L0, %2" "\n\t" \
348 ".set pop" "\n" \
349 : "=r" (__tmp) \
350 : "0" (__val), "m" (*__mem)); \
351 if (irq) \
352 local_irq_restore(__flags); \
353 } else \
354 BUG(); \
355 } \
356 \
357 static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
358 { \
359 volatile type *__mem; \
360 type __val; \
361 \
362 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
363 \
364 if (barrier) \
365 iobarrier_rw(); \
366 \
367 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
368 __val = *__mem; \
369 else if (cpu_has_64bits) { \
370 unsigned long __flags; \
371 \
372 if (irq) \
373 local_irq_save(__flags); \
374 __asm__ __volatile__( \
375 ".set push" "\t\t# __readq" "\n\t" \
376 ".set arch=r4000" "\n\t" \
377 "ld %L0, %1" "\n\t" \
378 "dsra32 %M0, %L0, 0" "\n\t" \
379 "sll %L0, %L0, 0" "\n\t" \
380 ".set pop" "\n" \
381 : "=r" (__val) \
382 : "m" (*__mem)); \
383 if (irq) \
384 local_irq_restore(__flags); \
385 } else { \
386 __val = 0; \
387 BUG(); \
388 } \
389 \
390 \
391 if (!relax) \
392 rmb(); \
393 return pfx##ioswab##bwlq(__mem, __val); \
394 }
395
396 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p) \
397 \
398 static inline void pfx##out##bwlq##p(type val, unsigned long port) \
399 { \
400 volatile type *__addr; \
401 type __val; \
402 \
403 if (barrier) \
404 iobarrier_rw(); \
405 else \
406 war_io_reorder_wmb(); \
407 \
408 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
409 \
410 __val = pfx##ioswab##bwlq(__addr, val); \
411 \
412 \
413 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
414 \
415 *__addr = __val; \
416 } \
417 \
418 static inline type pfx##in##bwlq##p(unsigned long port) \
419 { \
420 volatile type *__addr; \
421 type __val; \
422 \
423 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
424 \
425 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
426 \
427 if (barrier) \
428 iobarrier_rw(); \
429 \
430 __val = *__addr; \
431 \
432 \
433 if (!relax) \
434 rmb(); \
435 return pfx##ioswab##bwlq(__addr, __val); \
436 }
437
438 #define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \
439 \
440 __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1)
441
442 #define BUILDIO_MEM(bwlq, type) \
443 \
444 __BUILD_MEMORY_PFX(__raw_, bwlq, type, 0) \
445 __BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1) \
446 __BUILD_MEMORY_PFX(__mem_, bwlq, type, 0) \
447 __BUILD_MEMORY_PFX(, bwlq, type, 0)
448
449 BUILDIO_MEM(b, u8)
450 BUILDIO_MEM(w, u16)
451 BUILDIO_MEM(l, u32)
452 #ifdef CONFIG_64BIT
453 BUILDIO_MEM(q, u64)
454 #else
455 __BUILD_MEMORY_PFX(__raw_, q, u64, 0)
456 __BUILD_MEMORY_PFX(__mem_, q, u64, 0)
457 #endif
458
459 #define __BUILD_IOPORT_PFX(bus, bwlq, type) \
460 __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,) \
461 __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p)
462
463 #define BUILDIO_IOPORT(bwlq, type) \
464 __BUILD_IOPORT_PFX(, bwlq, type) \
465 __BUILD_IOPORT_PFX(__mem_, bwlq, type)
466
467 BUILDIO_IOPORT(b, u8)
468 BUILDIO_IOPORT(w, u16)
469 BUILDIO_IOPORT(l, u32)
470 #ifdef CONFIG_64BIT
471 BUILDIO_IOPORT(q, u64)
472 #endif
473
474 #define __BUILDIO(bwlq, type) \
475 \
476 __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0)
477
478 __BUILDIO(q, u64)
479
480 #define readb_relaxed __relaxed_readb
481 #define readw_relaxed __relaxed_readw
482 #define readl_relaxed __relaxed_readl
483 #ifdef CONFIG_64BIT
484 #define readq_relaxed __relaxed_readq
485 #endif
486
487 #define writeb_relaxed __relaxed_writeb
488 #define writew_relaxed __relaxed_writew
489 #define writel_relaxed __relaxed_writel
490 #ifdef CONFIG_64BIT
491 #define writeq_relaxed __relaxed_writeq
492 #endif
493
494 #define readb_be(addr) \
495 __raw_readb((__force unsigned *)(addr))
496 #define readw_be(addr) \
497 be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
498 #define readl_be(addr) \
499 be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
500 #define readq_be(addr) \
501 be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
502
503 #define writeb_be(val, addr) \
504 __raw_writeb((val), (__force unsigned *)(addr))
505 #define writew_be(val, addr) \
506 __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
507 #define writel_be(val, addr) \
508 __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
509 #define writeq_be(val, addr) \
510 __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
511
512
513
514
515 #ifdef CONFIG_64BIT
516 #define readq readq
517 #define writeq writeq
518 #endif
519
520 #define __BUILD_MEMORY_STRING(bwlq, type) \
521 \
522 static inline void writes##bwlq(volatile void __iomem *mem, \
523 const void *addr, unsigned int count) \
524 { \
525 const volatile type *__addr = addr; \
526 \
527 while (count--) { \
528 __mem_write##bwlq(*__addr, mem); \
529 __addr++; \
530 } \
531 } \
532 \
533 static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
534 unsigned int count) \
535 { \
536 volatile type *__addr = addr; \
537 \
538 while (count--) { \
539 *__addr = __mem_read##bwlq(mem); \
540 __addr++; \
541 } \
542 }
543
544 #define __BUILD_IOPORT_STRING(bwlq, type) \
545 \
546 static inline void outs##bwlq(unsigned long port, const void *addr, \
547 unsigned int count) \
548 { \
549 const volatile type *__addr = addr; \
550 \
551 while (count--) { \
552 __mem_out##bwlq(*__addr, port); \
553 __addr++; \
554 } \
555 } \
556 \
557 static inline void ins##bwlq(unsigned long port, void *addr, \
558 unsigned int count) \
559 { \
560 volatile type *__addr = addr; \
561 \
562 while (count--) { \
563 *__addr = __mem_in##bwlq(port); \
564 __addr++; \
565 } \
566 }
567
568 #define BUILDSTRING(bwlq, type) \
569 \
570 __BUILD_MEMORY_STRING(bwlq, type) \
571 __BUILD_IOPORT_STRING(bwlq, type)
572
573 BUILDSTRING(b, u8)
574 BUILDSTRING(w, u16)
575 BUILDSTRING(l, u32)
576 #ifdef CONFIG_64BIT
577 BUILDSTRING(q, u64)
578 #endif
579
580 static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
581 {
582 memset((void __force *) addr, val, count);
583 }
584 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
585 {
586 memcpy(dst, (void __force *) src, count);
587 }
588 static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
589 {
590 memcpy((void __force *) dst, src, count);
591 }
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613 #ifdef CONFIG_DMA_NONCOHERENT
614
615 extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
616 extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
617 extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
618
619 #define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
620 #define dma_cache_wback(start, size) _dma_cache_wback(start, size)
621 #define dma_cache_inv(start, size) _dma_cache_inv(start, size)
622
623 #else
624
625 #define dma_cache_wback_inv(start,size) \
626 do { (void) (start); (void) (size); } while (0)
627 #define dma_cache_wback(start,size) \
628 do { (void) (start); (void) (size); } while (0)
629 #define dma_cache_inv(start,size) \
630 do { (void) (start); (void) (size); } while (0)
631
632 #endif
633
634
635
636
637
638
639 #ifdef __MIPSEB__
640 #define __CSR_32_ADJUST 4
641 #else
642 #define __CSR_32_ADJUST 0
643 #endif
644
645 #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
646 #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
647
648
649
650
651
652 #define xlate_dev_mem_ptr(p) __va(p)
653
654
655
656
657 #define xlate_dev_kmem_ptr(p) p
658
659 void __ioread64_copy(void *to, const void __iomem *from, size_t count);
660
661 #endif