1/*
2 *  linux/arch/sparc/mm/leon_m.c
3 *
4 * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de, konrad@gaisler.com) Gaisler Research
5 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
6 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
7 *
8 * do srmmu probe in software
9 *
10 */
11
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <asm/asi.h>
15#include <asm/leon.h>
16#include <asm/tlbflush.h>
17
18#include "mm_32.h"
19
20int leon_flush_during_switch = 1;
21static int srmmu_swprobe_trace;
22
23static inline unsigned long leon_get_ctable_ptr(void)
24{
25	unsigned int retval;
26
27	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
28			     "=r" (retval) :
29			     "r" (SRMMU_CTXTBL_PTR),
30			     "i" (ASI_LEON_MMUREGS));
31	return (retval & SRMMU_CTX_PMASK) << 4;
32}
33
34
35unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
36{
37
38	unsigned int ctxtbl;
39	unsigned int pgd, pmd, ped;
40	unsigned int ptr;
41	unsigned int lvl, pte, paddrbase;
42	unsigned int ctx;
43	unsigned int paddr_calc;
44
45	paddrbase = 0;
46
47	if (srmmu_swprobe_trace)
48		printk(KERN_INFO "swprobe: trace on\n");
49
50	ctxtbl = leon_get_ctable_ptr();
51	if (!(ctxtbl)) {
52		if (srmmu_swprobe_trace)
53			printk(KERN_INFO "swprobe: leon_get_ctable_ptr returned 0=>0\n");
54		return 0;
55	}
56	if (!_pfn_valid(PFN(ctxtbl))) {
57		if (srmmu_swprobe_trace)
58			printk(KERN_INFO
59			       "swprobe: !_pfn_valid(%x)=>0\n",
60			       PFN(ctxtbl));
61		return 0;
62	}
63
64	ctx = srmmu_get_context();
65	if (srmmu_swprobe_trace)
66		printk(KERN_INFO "swprobe:  --- ctx (%x) ---\n", ctx);
67
68	pgd = LEON_BYPASS_LOAD_PA(ctxtbl + (ctx * 4));
69
70	if (((pgd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
71		if (srmmu_swprobe_trace)
72			printk(KERN_INFO "swprobe: pgd is entry level 3\n");
73		lvl = 3;
74		pte = pgd;
75		paddrbase = pgd & _SRMMU_PTE_PMASK_LEON;
76		goto ready;
77	}
78	if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
79		if (srmmu_swprobe_trace)
80			printk(KERN_INFO "swprobe: pgd is invalid => 0\n");
81		return 0;
82	}
83
84	if (srmmu_swprobe_trace)
85		printk(KERN_INFO "swprobe:  --- pgd (%x) ---\n", pgd);
86
87	ptr = (pgd & SRMMU_PTD_PMASK) << 4;
88	ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4);
89	if (!_pfn_valid(PFN(ptr)))
90		return 0;
91
92	pmd = LEON_BYPASS_LOAD_PA(ptr);
93	if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
94		if (srmmu_swprobe_trace)
95			printk(KERN_INFO "swprobe: pmd is entry level 2\n");
96		lvl = 2;
97		pte = pmd;
98		paddrbase = pmd & _SRMMU_PTE_PMASK_LEON;
99		goto ready;
100	}
101	if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
102		if (srmmu_swprobe_trace)
103			printk(KERN_INFO "swprobe: pmd is invalid => 0\n");
104		return 0;
105	}
106
107	if (srmmu_swprobe_trace)
108		printk(KERN_INFO "swprobe:  --- pmd (%x) ---\n", pmd);
109
110	ptr = (pmd & SRMMU_PTD_PMASK) << 4;
111	ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4);
112	if (!_pfn_valid(PFN(ptr))) {
113		if (srmmu_swprobe_trace)
114			printk(KERN_INFO "swprobe: !_pfn_valid(%x)=>0\n",
115			       PFN(ptr));
116		return 0;
117	}
118
119	ped = LEON_BYPASS_LOAD_PA(ptr);
120
121	if (((ped & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
122		if (srmmu_swprobe_trace)
123			printk(KERN_INFO "swprobe: ped is entry level 1\n");
124		lvl = 1;
125		pte = ped;
126		paddrbase = ped & _SRMMU_PTE_PMASK_LEON;
127		goto ready;
128	}
129	if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
130		if (srmmu_swprobe_trace)
131			printk(KERN_INFO "swprobe: ped is invalid => 0\n");
132		return 0;
133	}
134
135	if (srmmu_swprobe_trace)
136		printk(KERN_INFO "swprobe:  --- ped (%x) ---\n", ped);
137
138	ptr = (ped & SRMMU_PTD_PMASK) << 4;
139	ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4);
140	if (!_pfn_valid(PFN(ptr)))
141		return 0;
142
143	ptr = LEON_BYPASS_LOAD_PA(ptr);
144	if (((ptr & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
145		if (srmmu_swprobe_trace)
146			printk(KERN_INFO "swprobe: ptr is entry level 0\n");
147		lvl = 0;
148		pte = ptr;
149		paddrbase = ptr & _SRMMU_PTE_PMASK_LEON;
150		goto ready;
151	}
152	if (srmmu_swprobe_trace)
153		printk(KERN_INFO "swprobe: ptr is invalid => 0\n");
154	return 0;
155
156ready:
157	switch (lvl) {
158	case 0:
159		paddr_calc =
160		    (vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4);
161		break;
162	case 1:
163		paddr_calc =
164		    (vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4);
165		break;
166	case 2:
167		paddr_calc =
168		    (vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4);
169		break;
170	default:
171	case 3:
172		paddr_calc = vaddr;
173		break;
174	}
175	if (srmmu_swprobe_trace)
176		printk(KERN_INFO "swprobe: padde %x\n", paddr_calc);
177	if (paddr)
178		*paddr = paddr_calc;
179	return pte;
180}
181
182void leon_flush_icache_all(void)
183{
184	__asm__ __volatile__(" flush ");	/*iflush*/
185}
186
187void leon_flush_dcache_all(void)
188{
189	__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
190			     "i"(ASI_LEON_DFLUSH) : "memory");
191}
192
193void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page)
194{
195	if (vma->vm_flags & VM_EXEC)
196		leon_flush_icache_all();
197	leon_flush_dcache_all();
198}
199
200void leon_flush_cache_all(void)
201{
202	__asm__ __volatile__(" flush ");	/*iflush*/
203	__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
204			     "i"(ASI_LEON_DFLUSH) : "memory");
205}
206
207void leon_flush_tlb_all(void)
208{
209	leon_flush_cache_all();
210	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r"(0x400),
211			     "i"(ASI_LEON_MMUFLUSH) : "memory");
212}
213
214/* get all cache regs */
215void leon3_getCacheRegs(struct leon3_cacheregs *regs)
216{
217	unsigned long ccr, iccr, dccr;
218
219	if (!regs)
220		return;
221	/* Get Cache regs from "Cache ASI" address 0x0, 0x8 and 0xC */
222	__asm__ __volatile__("lda [%%g0] %3, %0\n\t"
223			     "mov 0x08, %%g1\n\t"
224			     "lda [%%g1] %3, %1\n\t"
225			     "mov 0x0c, %%g1\n\t"
226			     "lda [%%g1] %3, %2\n\t"
227			     : "=r"(ccr), "=r"(iccr), "=r"(dccr)
228			       /* output */
229			     : "i"(ASI_LEON_CACHEREGS)	/* input */
230			     : "g1"	/* clobber list */
231	    );
232	regs->ccr = ccr;
233	regs->iccr = iccr;
234	regs->dccr = dccr;
235}
236
237/* Due to virtual cache we need to check cache configuration if
238 * it is possible to skip flushing in some cases.
239 *
240 * Leon2 and Leon3 differ in their way of telling cache information
241 *
242 */
243int __init leon_flush_needed(void)
244{
245	int flush_needed = -1;
246	unsigned int ssize, sets;
247	char *setStr[4] =
248	    { "direct mapped", "2-way associative", "3-way associative",
249		"4-way associative"
250	};
251	/* leon 3 */
252	struct leon3_cacheregs cregs;
253	leon3_getCacheRegs(&cregs);
254	sets = (cregs.dccr & LEON3_XCCR_SETS_MASK) >> 24;
255	/* (ssize=>realsize) 0=>1k, 1=>2k, 2=>4k, 3=>8k ... */
256	ssize = 1 << ((cregs.dccr & LEON3_XCCR_SSIZE_MASK) >> 20);
257
258	printk(KERN_INFO "CACHE: %s cache, set size %dk\n",
259	       sets > 3 ? "unknown" : setStr[sets], ssize);
260	if ((ssize <= (PAGE_SIZE / 1024)) && (sets == 0)) {
261		/* Set Size <= Page size  ==>
262		   flush on every context switch not needed. */
263		flush_needed = 0;
264		printk(KERN_INFO "CACHE: not flushing on every context switch\n");
265	}
266	return flush_needed;
267}
268
269void leon_switch_mm(void)
270{
271	flush_tlb_mm((void *)0);
272	if (leon_flush_during_switch)
273		leon_flush_cache_all();
274}
275
276static void leon_flush_cache_mm(struct mm_struct *mm)
277{
278	leon_flush_cache_all();
279}
280
281static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
282{
283	leon_flush_pcache_all(vma, page);
284}
285
286static void leon_flush_cache_range(struct vm_area_struct *vma,
287				   unsigned long start,
288				   unsigned long end)
289{
290	leon_flush_cache_all();
291}
292
293static void leon_flush_tlb_mm(struct mm_struct *mm)
294{
295	leon_flush_tlb_all();
296}
297
298static void leon_flush_tlb_page(struct vm_area_struct *vma,
299				unsigned long page)
300{
301	leon_flush_tlb_all();
302}
303
304static void leon_flush_tlb_range(struct vm_area_struct *vma,
305				 unsigned long start,
306				 unsigned long end)
307{
308	leon_flush_tlb_all();
309}
310
311static void leon_flush_page_to_ram(unsigned long page)
312{
313	leon_flush_cache_all();
314}
315
316static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
317{
318	leon_flush_cache_all();
319}
320
321static void leon_flush_page_for_dma(unsigned long page)
322{
323	leon_flush_dcache_all();
324}
325
326void __init poke_leonsparc(void)
327{
328}
329
330static const struct sparc32_cachetlb_ops leon_ops = {
331	.cache_all	= leon_flush_cache_all,
332	.cache_mm	= leon_flush_cache_mm,
333	.cache_page	= leon_flush_cache_page,
334	.cache_range	= leon_flush_cache_range,
335	.tlb_all	= leon_flush_tlb_all,
336	.tlb_mm		= leon_flush_tlb_mm,
337	.tlb_page	= leon_flush_tlb_page,
338	.tlb_range	= leon_flush_tlb_range,
339	.page_to_ram	= leon_flush_page_to_ram,
340	.sig_insns	= leon_flush_sig_insns,
341	.page_for_dma	= leon_flush_page_for_dma,
342};
343
344void __init init_leon(void)
345{
346	srmmu_name = "LEON";
347	sparc32_cachetlb_ops = &leon_ops;
348	poke_srmmu = poke_leonsparc;
349
350	leon_flush_during_switch = leon_flush_needed();
351}
352