1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  */
14 
15 #ifndef _ASM_TILE_IO_H
16 #define _ASM_TILE_IO_H
17 
18 #include <linux/kernel.h>
19 #include <linux/bug.h>
20 #include <asm/page.h>
21 
22 /* Maximum PCI I/O space address supported. */
23 #define IO_SPACE_LIMIT 0xffffffff
24 
25 /*
26  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
27  * access.
28  */
29 #define xlate_dev_mem_ptr(p)	__va(p)
30 
31 /*
32  * Convert a virtual cached pointer to an uncached pointer.
33  */
34 #define xlate_dev_kmem_ptr(p)	p
35 
36 /*
37  * Change "struct page" to physical address.
38  */
39 #define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
40 
41 /*
42  * Some places try to pass in an loff_t for PHYSADDR (?!), so we cast it to
43  * long before casting it to a pointer to avoid compiler warnings.
44  */
45 #if CHIP_HAS_MMIO()
46 extern void __iomem *ioremap(resource_size_t offset, unsigned long size);
47 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
48 	pgprot_t pgprot);
49 extern void iounmap(volatile void __iomem *addr);
50 #else
51 #define ioremap(physaddr, size)	((void __iomem *)(unsigned long)(physaddr))
52 #define iounmap(addr)		((void)0)
53 #endif
54 
55 #define ioremap_nocache(physaddr, size)		ioremap(physaddr, size)
56 #define ioremap_wc(physaddr, size)		ioremap(physaddr, size)
57 #define ioremap_writethrough(physaddr, size)	ioremap(physaddr, size)
58 #define ioremap_fullcache(physaddr, size)	ioremap(physaddr, size)
59 
60 #define mmiowb()
61 
62 /* Conversion between virtual and physical mappings.  */
63 #define mm_ptov(addr)		((void *)phys_to_virt(addr))
64 #define mm_vtop(addr)		((unsigned long)virt_to_phys(addr))
65 
66 #if CHIP_HAS_MMIO()
67 
68 /*
69  * We use inline assembly to guarantee that the compiler does not
70  * split an access into multiple byte-sized accesses as it might
71  * sometimes do if a register data structure is marked "packed".
72  * Obviously on tile we can't tolerate such an access being
73  * actually unaligned, but we want to avoid the case where the
74  * compiler conservatively would generate multiple accesses even
75  * for an aligned read or write.
76  */
77 
__raw_readb(const volatile void __iomem * addr)78 static inline u8 __raw_readb(const volatile void __iomem *addr)
79 {
80 	return *(const volatile u8 __force *)addr;
81 }
82 
__raw_readw(const volatile void __iomem * addr)83 static inline u16 __raw_readw(const volatile void __iomem *addr)
84 {
85 	u16 ret;
86 	asm volatile("ld2u %0, %1" : "=r" (ret) : "r" (addr));
87 	barrier();
88 	return le16_to_cpu(ret);
89 }
90 
__raw_readl(const volatile void __iomem * addr)91 static inline u32 __raw_readl(const volatile void __iomem *addr)
92 {
93 	u32 ret;
94 	/* Sign-extend to conform to u32 ABI sign-extension convention. */
95 	asm volatile("ld4s %0, %1" : "=r" (ret) : "r" (addr));
96 	barrier();
97 	return le32_to_cpu(ret);
98 }
99 
__raw_readq(const volatile void __iomem * addr)100 static inline u64 __raw_readq(const volatile void __iomem *addr)
101 {
102 	u64 ret;
103 	asm volatile("ld %0, %1" : "=r" (ret) : "r" (addr));
104 	barrier();
105 	return le64_to_cpu(ret);
106 }
107 
__raw_writeb(u8 val,volatile void __iomem * addr)108 static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
109 {
110 	*(volatile u8 __force *)addr = val;
111 }
112 
__raw_writew(u16 val,volatile void __iomem * addr)113 static inline void __raw_writew(u16 val, volatile void __iomem *addr)
114 {
115 	asm volatile("st2 %0, %1" :: "r" (addr), "r" (cpu_to_le16(val)));
116 }
117 
__raw_writel(u32 val,volatile void __iomem * addr)118 static inline void __raw_writel(u32 val, volatile void __iomem *addr)
119 {
120 	asm volatile("st4 %0, %1" :: "r" (addr), "r" (cpu_to_le32(val)));
121 }
122 
__raw_writeq(u64 val,volatile void __iomem * addr)123 static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
124 {
125 	asm volatile("st %0, %1" :: "r" (addr), "r" (cpu_to_le64(val)));
126 }
127 
128 /*
129  * The on-chip I/O hardware on tilegx is configured with VA=PA for the
130  * kernel's PA range.  The low-level APIs and field names use "va" and
131  * "void *" nomenclature, to be consistent with the general notion
132  * that the addresses in question are virtualizable, but in the kernel
133  * context we are actually manipulating PA values.  (In other contexts,
134  * e.g. access from user space, we do in fact use real virtual addresses
135  * in the va fields.)  To allow readers of the code to understand what's
136  * happening, we direct their attention to this comment by using the
137  * following two functions that just duplicate __va() and __pa().
138  */
139 typedef unsigned long tile_io_addr_t;
va_to_tile_io_addr(void * va)140 static inline tile_io_addr_t va_to_tile_io_addr(void *va)
141 {
142 	BUILD_BUG_ON(sizeof(phys_addr_t) != sizeof(tile_io_addr_t));
143 	return __pa(va);
144 }
tile_io_addr_to_va(tile_io_addr_t tile_io_addr)145 static inline void *tile_io_addr_to_va(tile_io_addr_t tile_io_addr)
146 {
147 	return __va(tile_io_addr);
148 }
149 
150 #else /* CHIP_HAS_MMIO() */
151 
152 #ifdef CONFIG_PCI
153 
154 extern u8 _tile_readb(unsigned long addr);
155 extern u16 _tile_readw(unsigned long addr);
156 extern u32 _tile_readl(unsigned long addr);
157 extern u64 _tile_readq(unsigned long addr);
158 extern void _tile_writeb(u8  val, unsigned long addr);
159 extern void _tile_writew(u16 val, unsigned long addr);
160 extern void _tile_writel(u32 val, unsigned long addr);
161 extern void _tile_writeq(u64 val, unsigned long addr);
162 
163 #define __raw_readb(addr) _tile_readb((unsigned long)addr)
164 #define __raw_readw(addr) _tile_readw((unsigned long)addr)
165 #define __raw_readl(addr) _tile_readl((unsigned long)addr)
166 #define __raw_readq(addr) _tile_readq((unsigned long)addr)
167 #define __raw_writeb(val, addr) _tile_writeb(val, (unsigned long)addr)
168 #define __raw_writew(val, addr) _tile_writew(val, (unsigned long)addr)
169 #define __raw_writel(val, addr) _tile_writel(val, (unsigned long)addr)
170 #define __raw_writeq(val, addr) _tile_writeq(val, (unsigned long)addr)
171 
172 #else /* CONFIG_PCI */
173 
174 /*
175  * The tilepro architecture does not support IOMEM unless PCI is enabled.
176  * Unfortunately we can't yet simply not declare these methods,
177  * since some generic code that compiles into the kernel, but
178  * we never run, uses them unconditionally.
179  */
180 
iomem_panic(void)181 static inline int iomem_panic(void)
182 {
183 	panic("readb/writeb and friends do not exist on tile without PCI");
184 	return 0;
185 }
186 
readb(unsigned long addr)187 static inline u8 readb(unsigned long addr)
188 {
189 	return iomem_panic();
190 }
191 
_readw(unsigned long addr)192 static inline u16 _readw(unsigned long addr)
193 {
194 	return iomem_panic();
195 }
196 
readl(unsigned long addr)197 static inline u32 readl(unsigned long addr)
198 {
199 	return iomem_panic();
200 }
201 
readq(unsigned long addr)202 static inline u64 readq(unsigned long addr)
203 {
204 	return iomem_panic();
205 }
206 
writeb(u8 val,unsigned long addr)207 static inline void writeb(u8  val, unsigned long addr)
208 {
209 	iomem_panic();
210 }
211 
writew(u16 val,unsigned long addr)212 static inline void writew(u16 val, unsigned long addr)
213 {
214 	iomem_panic();
215 }
216 
writel(u32 val,unsigned long addr)217 static inline void writel(u32 val, unsigned long addr)
218 {
219 	iomem_panic();
220 }
221 
writeq(u64 val,unsigned long addr)222 static inline void writeq(u64 val, unsigned long addr)
223 {
224 	iomem_panic();
225 }
226 
227 #endif /* CONFIG_PCI */
228 
229 #endif /* CHIP_HAS_MMIO() */
230 
231 #define readb __raw_readb
232 #define readw __raw_readw
233 #define readl __raw_readl
234 #define readq __raw_readq
235 #define writeb __raw_writeb
236 #define writew __raw_writew
237 #define writel __raw_writel
238 #define writeq __raw_writeq
239 
240 #define readb_relaxed readb
241 #define readw_relaxed readw
242 #define readl_relaxed readl
243 #define readq_relaxed readq
244 #define writeb_relaxed writeb
245 #define writew_relaxed writew
246 #define writel_relaxed writel
247 #define writeq_relaxed writeq
248 
249 #define ioread8 readb
250 #define ioread16 readw
251 #define ioread32 readl
252 #define ioread64 readq
253 #define iowrite8 writeb
254 #define iowrite16 writew
255 #define iowrite32 writel
256 #define iowrite64 writeq
257 
258 #if CHIP_HAS_MMIO() || defined(CONFIG_PCI)
259 
memset_io(volatile void * dst,int val,size_t len)260 static inline void memset_io(volatile void *dst, int val, size_t len)
261 {
262 	size_t x;
263 	BUG_ON((unsigned long)dst & 0x3);
264 	val = (val & 0xff) * 0x01010101;
265 	for (x = 0; x < len; x += 4)
266 		writel(val, dst + x);
267 }
268 
memcpy_fromio(void * dst,const volatile void __iomem * src,size_t len)269 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
270 				 size_t len)
271 {
272 	size_t x;
273 	BUG_ON((unsigned long)src & 0x3);
274 	for (x = 0; x < len; x += 4)
275 		*(u32 *)(dst + x) = readl(src + x);
276 }
277 
memcpy_toio(volatile void __iomem * dst,const void * src,size_t len)278 static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
279 				size_t len)
280 {
281 	size_t x;
282 	BUG_ON((unsigned long)dst & 0x3);
283 	for (x = 0; x < len; x += 4)
284 		writel(*(u32 *)(src + x), dst + x);
285 }
286 
287 #endif
288 
289 #if CHIP_HAS_MMIO() && defined(CONFIG_TILE_PCI_IO)
290 
inb(unsigned long addr)291 static inline u8 inb(unsigned long addr)
292 {
293 	return readb((volatile void __iomem *) addr);
294 }
295 
inw(unsigned long addr)296 static inline u16 inw(unsigned long addr)
297 {
298 	return readw((volatile void __iomem *) addr);
299 }
300 
inl(unsigned long addr)301 static inline u32 inl(unsigned long addr)
302 {
303 	return readl((volatile void __iomem *) addr);
304 }
305 
outb(u8 b,unsigned long addr)306 static inline void outb(u8 b, unsigned long addr)
307 {
308 	writeb(b, (volatile void __iomem *) addr);
309 }
310 
outw(u16 b,unsigned long addr)311 static inline void outw(u16 b, unsigned long addr)
312 {
313 	writew(b, (volatile void __iomem *) addr);
314 }
315 
outl(u32 b,unsigned long addr)316 static inline void outl(u32 b, unsigned long addr)
317 {
318 	writel(b, (volatile void __iomem *) addr);
319 }
320 
insb(unsigned long addr,void * buffer,int count)321 static inline void insb(unsigned long addr, void *buffer, int count)
322 {
323 	if (count) {
324 		u8 *buf = buffer;
325 		do {
326 			u8 x = inb(addr);
327 			*buf++ = x;
328 		} while (--count);
329 	}
330 }
331 
insw(unsigned long addr,void * buffer,int count)332 static inline void insw(unsigned long addr, void *buffer, int count)
333 {
334 	if (count) {
335 		u16 *buf = buffer;
336 		do {
337 			u16 x = inw(addr);
338 			*buf++ = x;
339 		} while (--count);
340 	}
341 }
342 
insl(unsigned long addr,void * buffer,int count)343 static inline void insl(unsigned long addr, void *buffer, int count)
344 {
345 	if (count) {
346 		u32 *buf = buffer;
347 		do {
348 			u32 x = inl(addr);
349 			*buf++ = x;
350 		} while (--count);
351 	}
352 }
353 
outsb(unsigned long addr,const void * buffer,int count)354 static inline void outsb(unsigned long addr, const void *buffer, int count)
355 {
356 	if (count) {
357 		const u8 *buf = buffer;
358 		do {
359 			outb(*buf++, addr);
360 		} while (--count);
361 	}
362 }
363 
outsw(unsigned long addr,const void * buffer,int count)364 static inline void outsw(unsigned long addr, const void *buffer, int count)
365 {
366 	if (count) {
367 		const u16 *buf = buffer;
368 		do {
369 			outw(*buf++, addr);
370 		} while (--count);
371 	}
372 }
373 
outsl(unsigned long addr,const void * buffer,int count)374 static inline void outsl(unsigned long addr, const void *buffer, int count)
375 {
376 	if (count) {
377 		const u32 *buf = buffer;
378 		do {
379 			outl(*buf++, addr);
380 		} while (--count);
381 	}
382 }
383 
384 extern void __iomem *ioport_map(unsigned long port, unsigned int len);
385 extern void ioport_unmap(void __iomem *addr);
386 
387 #else
388 
389 /*
390  * The TilePro architecture does not support IOPORT, even with PCI.
391  * Unfortunately we can't yet simply not declare these methods,
392  * since some generic code that compiles into the kernel, but
393  * we never run, uses them unconditionally.
394  */
395 
ioport_panic(void)396 static inline long ioport_panic(void)
397 {
398 #ifdef __tilegx__
399 	panic("PCI IO space support is disabled. Configure the kernel with CONFIG_TILE_PCI_IO to enable it");
400 #else
401 	panic("inb/outb and friends do not exist on tile");
402 #endif
403 	return 0;
404 }
405 
ioport_map(unsigned long port,unsigned int len)406 static inline void __iomem *ioport_map(unsigned long port, unsigned int len)
407 {
408 	pr_info("ioport_map: mapping IO resources is unsupported on tile\n");
409 	return NULL;
410 }
411 
ioport_unmap(void __iomem * addr)412 static inline void ioport_unmap(void __iomem *addr)
413 {
414 	ioport_panic();
415 }
416 
inb(unsigned long addr)417 static inline u8 inb(unsigned long addr)
418 {
419 	return ioport_panic();
420 }
421 
inw(unsigned long addr)422 static inline u16 inw(unsigned long addr)
423 {
424 	return ioport_panic();
425 }
426 
inl(unsigned long addr)427 static inline u32 inl(unsigned long addr)
428 {
429 	return ioport_panic();
430 }
431 
outb(u8 b,unsigned long addr)432 static inline void outb(u8 b, unsigned long addr)
433 {
434 	ioport_panic();
435 }
436 
outw(u16 b,unsigned long addr)437 static inline void outw(u16 b, unsigned long addr)
438 {
439 	ioport_panic();
440 }
441 
outl(u32 b,unsigned long addr)442 static inline void outl(u32 b, unsigned long addr)
443 {
444 	ioport_panic();
445 }
446 
insb(unsigned long addr,void * buffer,int count)447 static inline void insb(unsigned long addr, void *buffer, int count)
448 {
449 	ioport_panic();
450 }
451 
insw(unsigned long addr,void * buffer,int count)452 static inline void insw(unsigned long addr, void *buffer, int count)
453 {
454 	ioport_panic();
455 }
456 
insl(unsigned long addr,void * buffer,int count)457 static inline void insl(unsigned long addr, void *buffer, int count)
458 {
459 	ioport_panic();
460 }
461 
outsb(unsigned long addr,const void * buffer,int count)462 static inline void outsb(unsigned long addr, const void *buffer, int count)
463 {
464 	ioport_panic();
465 }
466 
outsw(unsigned long addr,const void * buffer,int count)467 static inline void outsw(unsigned long addr, const void *buffer, int count)
468 {
469 	ioport_panic();
470 }
471 
outsl(unsigned long addr,const void * buffer,int count)472 static inline void outsl(unsigned long addr, const void *buffer, int count)
473 {
474 	ioport_panic();
475 }
476 
477 #endif /* CHIP_HAS_MMIO() && defined(CONFIG_TILE_PCI_IO) */
478 
479 #define inb_p(addr)	inb(addr)
480 #define inw_p(addr)	inw(addr)
481 #define inl_p(addr)	inl(addr)
482 #define outb_p(x, addr)	outb((x), (addr))
483 #define outw_p(x, addr)	outw((x), (addr))
484 #define outl_p(x, addr)	outl((x), (addr))
485 
486 #define ioread16be(addr)	be16_to_cpu(ioread16(addr))
487 #define ioread32be(addr)	be32_to_cpu(ioread32(addr))
488 #define iowrite16be(v, addr)	iowrite16(be16_to_cpu(v), (addr))
489 #define iowrite32be(v, addr)	iowrite32(be32_to_cpu(v), (addr))
490 
491 #define ioread8_rep(p, dst, count) \
492 	insb((unsigned long) (p), (dst), (count))
493 #define ioread16_rep(p, dst, count) \
494 	insw((unsigned long) (p), (dst), (count))
495 #define ioread32_rep(p, dst, count) \
496 	insl((unsigned long) (p), (dst), (count))
497 
498 #define iowrite8_rep(p, src, count) \
499 	outsb((unsigned long) (p), (src), (count))
500 #define iowrite16_rep(p, src, count) \
501 	outsw((unsigned long) (p), (src), (count))
502 #define iowrite32_rep(p, src, count) \
503 	outsl((unsigned long) (p), (src), (count))
504 
505 #define virt_to_bus     virt_to_phys
506 #define bus_to_virt     phys_to_virt
507 
508 #endif /* _ASM_TILE_IO_H */
509