1/*
2 * Copyright (C) 2008,2009,2010,2011 Imagination Technologies Ltd.
3 *
4 * Meta 2 enhanced mode MMU handling code.
5 *
6 */
7
8#include <linux/mm.h>
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/bootmem.h>
13#include <linux/syscore_ops.h>
14
15#include <asm/mmu.h>
16#include <asm/mmu_context.h>
17
18unsigned long mmu_read_first_level_page(unsigned long vaddr)
19{
20	unsigned int cpu = hard_processor_id();
21	unsigned long offset, linear_base, linear_limit;
22	unsigned int phys0;
23	pgd_t *pgd, entry;
24
25	if (is_global_space(vaddr))
26		vaddr &= ~0x80000000;
27
28	offset = vaddr >> PGDIR_SHIFT;
29
30	phys0 = metag_in32(mmu_phys0_addr(cpu));
31
32	/* Top bit of linear base is always zero. */
33	linear_base = (phys0 >> PGDIR_SHIFT) & 0x1ff;
34
35	/* Limit in the range 0 (4MB) to 9 (2GB). */
36	linear_limit = 1 << ((phys0 >> 8) & 0xf);
37	linear_limit += linear_base;
38
39	/*
40	 * If offset is below linear base or above the limit then no
41	 * mapping exists.
42	 */
43	if (offset < linear_base || offset > linear_limit)
44		return 0;
45
46	offset -= linear_base;
47	pgd = (pgd_t *)mmu_get_base();
48	entry = pgd[offset];
49
50	return pgd_val(entry);
51}
52
53unsigned long mmu_read_second_level_page(unsigned long vaddr)
54{
55	return __builtin_meta2_cacherd((void *)(vaddr & PAGE_MASK));
56}
57
58unsigned long mmu_get_base(void)
59{
60	unsigned int cpu = hard_processor_id();
61	unsigned long stride;
62
63	stride = cpu * LINSYSMEMTnX_STRIDE;
64
65	/*
66	 * Bits 18:2 of the MMCU_TnLocal_TABLE_PHYS1 register should be
67	 * used as an offset to the start of the top-level pgd table.
68	 */
69	stride += (metag_in32(mmu_phys1_addr(cpu)) & 0x7fffc);
70
71	if (is_global_space(PAGE_OFFSET))
72		stride += LINSYSMEMTXG_OFFSET;
73
74	return LINSYSMEMT0L_BASE + stride;
75}
76
77#define FIRST_LEVEL_MASK	0xffffffc0
78#define SECOND_LEVEL_MASK	0xfffff000
79#define SECOND_LEVEL_ALIGN	64
80
81static void repriv_mmu_tables(void)
82{
83	unsigned long phys0_addr;
84	unsigned int g;
85
86	/*
87	 * Check that all the mmu table regions are priv protected, and if not
88	 * fix them and emit a warning. If we left them without priv protection
89	 * then userland processes would have access to a 2M window into
90	 * physical memory near where the page tables are.
91	 */
92	phys0_addr = MMCU_T0LOCAL_TABLE_PHYS0;
93	for (g = 0; g < 2; ++g) {
94		unsigned int t, phys0;
95		unsigned long flags;
96		for (t = 0; t < 4; ++t) {
97			__global_lock2(flags);
98			phys0 = metag_in32(phys0_addr);
99			if ((phys0 & _PAGE_PRESENT) && !(phys0 & _PAGE_PRIV)) {
100				pr_warn("Fixing priv protection on T%d %s MMU table region\n",
101					t,
102					g ? "global" : "local");
103				phys0 |= _PAGE_PRIV;
104				metag_out32(phys0, phys0_addr);
105			}
106			__global_unlock2(flags);
107
108			phys0_addr += MMCU_TnX_TABLE_PHYSX_STRIDE;
109		}
110
111		phys0_addr += MMCU_TXG_TABLE_PHYSX_OFFSET
112			    - 4*MMCU_TnX_TABLE_PHYSX_STRIDE;
113	}
114}
115
116#ifdef CONFIG_METAG_SUSPEND_MEM
117static void mmu_resume(void)
118{
119	/*
120	 * If a full suspend to RAM has happened then the original bad MMU table
121	 * priv may have been restored, so repriv them again.
122	 */
123	repriv_mmu_tables();
124}
125#else
126#define mmu_resume NULL
127#endif	/* CONFIG_METAG_SUSPEND_MEM */
128
129static struct syscore_ops mmu_syscore_ops = {
130	.resume  = mmu_resume,
131};
132
133void __init mmu_init(unsigned long mem_end)
134{
135	unsigned long entry, addr;
136	pgd_t *p_swapper_pg_dir;
137#ifdef CONFIG_KERNEL_4M_PAGES
138	unsigned long mem_size = mem_end - PAGE_OFFSET;
139	unsigned int pages = DIV_ROUND_UP(mem_size, 1 << 22);
140	unsigned int second_level_entry = 0;
141	unsigned long *second_level_table;
142#endif
143
144	/*
145	 * Now copy over any MMU pgd entries already in the mmu page tables
146	 * over to our root init process (swapper_pg_dir) map.  This map is
147	 * then inherited by all other processes, which means all processes
148	 * inherit a map of the kernel space.
149	 */
150	addr = META_MEMORY_BASE;
151	entry = pgd_index(META_MEMORY_BASE);
152	p_swapper_pg_dir = pgd_offset_k(0) + entry;
153
154	while (entry < (PTRS_PER_PGD - pgd_index(META_MEMORY_BASE))) {
155		unsigned long pgd_entry;
156		/* copy over the current MMU value */
157		pgd_entry = mmu_read_first_level_page(addr);
158		pgd_val(*p_swapper_pg_dir) = pgd_entry;
159
160		p_swapper_pg_dir++;
161		addr += PGDIR_SIZE;
162		entry++;
163	}
164
165#ifdef CONFIG_KERNEL_4M_PAGES
166	/*
167	 * At this point we can also map the kernel with 4MB pages to
168	 * reduce TLB pressure.
169	 */
170	second_level_table = alloc_bootmem_pages(SECOND_LEVEL_ALIGN * pages);
171
172	addr = PAGE_OFFSET;
173	entry = pgd_index(PAGE_OFFSET);
174	p_swapper_pg_dir = pgd_offset_k(0) + entry;
175
176	while (pages > 0) {
177		unsigned long phys_addr, second_level_phys;
178		pte_t *pte = (pte_t *)&second_level_table[second_level_entry];
179
180		phys_addr = __pa(addr);
181
182		second_level_phys = __pa(pte);
183
184		pgd_val(*p_swapper_pg_dir) = ((second_level_phys &
185					       FIRST_LEVEL_MASK) |
186					      _PAGE_SZ_4M |
187					      _PAGE_PRESENT);
188
189		pte_val(*pte) = ((phys_addr & SECOND_LEVEL_MASK) |
190				 _PAGE_PRESENT | _PAGE_DIRTY |
191				 _PAGE_ACCESSED | _PAGE_WRITE |
192				 _PAGE_CACHEABLE | _PAGE_KERNEL);
193
194		p_swapper_pg_dir++;
195		addr += PGDIR_SIZE;
196		/* Second level pages must be 64byte aligned. */
197		second_level_entry += (SECOND_LEVEL_ALIGN /
198				       sizeof(unsigned long));
199		pages--;
200	}
201	load_pgd(swapper_pg_dir, hard_processor_id());
202	flush_tlb_all();
203#endif
204
205	repriv_mmu_tables();
206	register_syscore_ops(&mmu_syscore_ops);
207}
208