1/*
2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU substantially follows the
4 * architecture specification.  This includes the 6xx, 7xx, 7xxx,
5 * and 8260 implementations but excludes the 8xx and 4xx.
6 *  -- paulus
7 *
8 *  Derived from arch/ppc/mm/init.c:
9 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 *    Copyright (C) 1996 Paul Mackerras
14 *
15 *  Derived from "arch/i386/mm/init.c"
16 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
17 *
18 *  This program is free software; you can redistribute it and/or
19 *  modify it under the terms of the GNU General Public License
20 *  as published by the Free Software Foundation; either version
21 *  2 of the License, or (at your option) any later version.
22 *
23 */
24
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/init.h>
28#include <linux/highmem.h>
29#include <linux/memblock.h>
30
31#include <asm/prom.h>
32#include <asm/mmu.h>
33#include <asm/machdep.h>
34
35#include "mmu_decl.h"
36
37struct hash_pte *Hash, *Hash_end;
38unsigned long Hash_size, Hash_mask;
39unsigned long _SDR1;
40
41struct ppc_bat BATS[8][2];	/* 8 pairs of IBAT, DBAT */
42
43struct batrange {		/* stores address ranges mapped by BATs */
44	unsigned long start;
45	unsigned long limit;
46	phys_addr_t phys;
47} bat_addrs[8];
48
49/*
50 * Return PA for this VA if it is mapped by a BAT, or 0
51 */
52phys_addr_t v_mapped_by_bats(unsigned long va)
53{
54	int b;
55	for (b = 0; b < 4; ++b)
56		if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
57			return bat_addrs[b].phys + (va - bat_addrs[b].start);
58	return 0;
59}
60
61/*
62 * Return VA for a given PA or 0 if not mapped
63 */
64unsigned long p_mapped_by_bats(phys_addr_t pa)
65{
66	int b;
67	for (b = 0; b < 4; ++b)
68		if (pa >= bat_addrs[b].phys
69	    	    && pa < (bat_addrs[b].limit-bat_addrs[b].start)
70		              +bat_addrs[b].phys)
71			return bat_addrs[b].start+(pa-bat_addrs[b].phys);
72	return 0;
73}
74
75unsigned long __init mmu_mapin_ram(unsigned long top)
76{
77	unsigned long tot, bl, done;
78	unsigned long max_size = (256<<20);
79
80	if (__map_without_bats) {
81		printk(KERN_DEBUG "RAM mapped without BATs\n");
82		return 0;
83	}
84
85	/* Set up BAT2 and if necessary BAT3 to cover RAM. */
86
87	/* Make sure we don't map a block larger than the
88	   smallest alignment of the physical address. */
89	tot = top;
90	for (bl = 128<<10; bl < max_size; bl <<= 1) {
91		if (bl * 2 > tot)
92			break;
93	}
94
95	setbat(2, PAGE_OFFSET, 0, bl, PAGE_KERNEL_X);
96	done = (unsigned long)bat_addrs[2].limit - PAGE_OFFSET + 1;
97	if ((done < tot) && !bat_addrs[3].limit) {
98		/* use BAT3 to cover a bit more */
99		tot -= done;
100		for (bl = 128<<10; bl < max_size; bl <<= 1)
101			if (bl * 2 > tot)
102				break;
103		setbat(3, PAGE_OFFSET+done, done, bl, PAGE_KERNEL_X);
104		done = (unsigned long)bat_addrs[3].limit - PAGE_OFFSET + 1;
105	}
106
107	return done;
108}
109
110/*
111 * Set up one of the I/D BAT (block address translation) register pairs.
112 * The parameters are not checked; in particular size must be a power
113 * of 2 between 128k and 256M.
114 */
115void __init setbat(int index, unsigned long virt, phys_addr_t phys,
116		   unsigned int size, pgprot_t prot)
117{
118	unsigned int bl;
119	int wimgxpp;
120	struct ppc_bat *bat = BATS[index];
121	unsigned long flags = pgprot_val(prot);
122
123	if ((flags & _PAGE_NO_CACHE) ||
124	    (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0))
125		flags &= ~_PAGE_COHERENT;
126
127	bl = (size >> 17) - 1;
128	if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
129		/* 603, 604, etc. */
130		/* Do DBAT first */
131		wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
132				   | _PAGE_COHERENT | _PAGE_GUARDED);
133		wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
134		bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
135		bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
136		if (flags & _PAGE_USER)
137			bat[1].batu |= 1; 	/* Vp = 1 */
138		if (flags & _PAGE_GUARDED) {
139			/* G bit must be zero in IBATs */
140			bat[0].batu = bat[0].batl = 0;
141		} else {
142			/* make IBAT same as DBAT */
143			bat[0] = bat[1];
144		}
145	} else {
146		/* 601 cpu */
147		if (bl > BL_8M)
148			bl = BL_8M;
149		wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
150				   | _PAGE_COHERENT);
151		wimgxpp |= (flags & _PAGE_RW)?
152			((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX;
153		bat->batu = virt | wimgxpp | 4;	/* Ks=0, Ku=1 */
154		bat->batl = phys | bl | 0x40;	/* V=1 */
155	}
156
157	bat_addrs[index].start = virt;
158	bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
159	bat_addrs[index].phys = phys;
160}
161
162/*
163 * Preload a translation in the hash table
164 */
165void hash_preload(struct mm_struct *mm, unsigned long ea,
166		  unsigned long access, unsigned long trap)
167{
168	pmd_t *pmd;
169
170	if (Hash == 0)
171		return;
172	pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea);
173	if (!pmd_none(*pmd))
174		add_hash_page(mm->context.id, ea, pmd_val(*pmd));
175}
176
177/*
178 * Initialize the hash table and patch the instructions in hashtable.S.
179 */
180void __init MMU_init_hw(void)
181{
182	unsigned int hmask, mb, mb2;
183	unsigned int n_hpteg, lg_n_hpteg;
184
185	extern unsigned int hash_page_patch_A[];
186	extern unsigned int hash_page_patch_B[], hash_page_patch_C[];
187	extern unsigned int hash_page[];
188	extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
189
190	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
191		/*
192		 * Put a blr (procedure return) instruction at the
193		 * start of hash_page, since we can still get DSI
194		 * exceptions on a 603.
195		 */
196		hash_page[0] = 0x4e800020;
197		flush_icache_range((unsigned long) &hash_page[0],
198				   (unsigned long) &hash_page[1]);
199		return;
200	}
201
202	if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
203
204#define LG_HPTEG_SIZE	6		/* 64 bytes per HPTEG */
205#define SDR1_LOW_BITS	((n_hpteg - 1) >> 10)
206#define MIN_N_HPTEG	1024		/* min 64kB hash table */
207
208	/*
209	 * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
210	 * This is less than the recommended amount, but then
211	 * Linux ain't AIX.
212	 */
213	n_hpteg = total_memory / (PAGE_SIZE * 8);
214	if (n_hpteg < MIN_N_HPTEG)
215		n_hpteg = MIN_N_HPTEG;
216	lg_n_hpteg = __ilog2(n_hpteg);
217	if (n_hpteg & (n_hpteg - 1)) {
218		++lg_n_hpteg;		/* round up if not power of 2 */
219		n_hpteg = 1 << lg_n_hpteg;
220	}
221	Hash_size = n_hpteg << LG_HPTEG_SIZE;
222
223	/*
224	 * Find some memory for the hash table.
225	 */
226	if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
227	Hash = __va(memblock_alloc(Hash_size, Hash_size));
228	memset(Hash, 0, Hash_size);
229	_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
230
231	Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size);
232
233	printk("Total memory = %lldMB; using %ldkB for hash table (at %p)\n",
234	       (unsigned long long)(total_memory >> 20), Hash_size >> 10, Hash);
235
236
237	/*
238	 * Patch up the instructions in hashtable.S:create_hpte
239	 */
240	if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);
241	Hash_mask = n_hpteg - 1;
242	hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
243	mb2 = mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
244	if (lg_n_hpteg > 16)
245		mb2 = 16 - LG_HPTEG_SIZE;
246
247	hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff)
248		| ((unsigned int)(Hash) >> 16);
249	hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0) | (mb << 6);
250	hash_page_patch_A[2] = (hash_page_patch_A[2] & ~0x7c0) | (mb2 << 6);
251	hash_page_patch_B[0] = (hash_page_patch_B[0] & ~0xffff) | hmask;
252	hash_page_patch_C[0] = (hash_page_patch_C[0] & ~0xffff) | hmask;
253
254	/*
255	 * Ensure that the locations we've patched have been written
256	 * out from the data cache and invalidated in the instruction
257	 * cache, on those machines with split caches.
258	 */
259	flush_icache_range((unsigned long) &hash_page_patch_A[0],
260			   (unsigned long) &hash_page_patch_C[1]);
261
262	/*
263	 * Patch up the instructions in hashtable.S:flush_hash_page
264	 */
265	flush_hash_patch_A[0] = (flush_hash_patch_A[0] & ~0xffff)
266		| ((unsigned int)(Hash) >> 16);
267	flush_hash_patch_A[1] = (flush_hash_patch_A[1] & ~0x7c0) | (mb << 6);
268	flush_hash_patch_A[2] = (flush_hash_patch_A[2] & ~0x7c0) | (mb2 << 6);
269	flush_hash_patch_B[0] = (flush_hash_patch_B[0] & ~0xffff) | hmask;
270	flush_icache_range((unsigned long) &flush_hash_patch_A[0],
271			   (unsigned long) &flush_hash_patch_B[1]);
272
273	if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
274}
275
276void setup_initial_memory_limit(phys_addr_t first_memblock_base,
277				phys_addr_t first_memblock_size)
278{
279	/* We don't currently support the first MEMBLOCK not mapping 0
280	 * physical on those processors
281	 */
282	BUG_ON(first_memblock_base != 0);
283
284	/* 601 can only access 16MB at the moment */
285	if (PVR_VER(mfspr(SPRN_PVR)) == 1)
286		memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000));
287	else /* Anything else has 256M mapped */
288		memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000));
289}
290