1/*
2 * Low-level SLB routines
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 *
6 * Based on earlier C version:
7 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
8 *    Copyright (c) 2001 Dave Engebretsen
9 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
10 *
11 *  This program is free software; you can redistribute it and/or
12 *  modify it under the terms of the GNU General Public License
13 *  as published by the Free Software Foundation; either version
14 *  2 of the License, or (at your option) any later version.
15 */
16
17#include <asm/processor.h>
18#include <asm/ppc_asm.h>
19#include <asm/asm-offsets.h>
20#include <asm/cputable.h>
21#include <asm/page.h>
22#include <asm/mmu.h>
23#include <asm/pgtable.h>
24#include <asm/firmware.h>
25
26/* void slb_allocate_realmode(unsigned long ea);
27 *
28 * Create an SLB entry for the given EA (user or kernel).
29 * 	r3 = faulting address, r13 = PACA
30 *	r9, r10, r11 are clobbered by this function
31 * No other registers are examined or changed.
32 */
33_GLOBAL(slb_allocate_realmode)
34	/*
35	 * check for bad kernel/user address
36	 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
37	 */
38	rldicr. r9,r3,4,(63 - PGTABLE_EADDR_SIZE - 4)
39	bne-	8f
40
41	srdi	r9,r3,60		/* get region */
42	srdi	r10,r3,SID_SHIFT	/* get esid */
43	cmpldi	cr7,r9,0xc		/* cmp PAGE_OFFSET for later use */
44
45	/* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
46	blt	cr7,0f			/* user or kernel? */
47
48	/* kernel address: proto-VSID = ESID */
49	/* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
50	 * this code will generate the protoVSID 0xfffffffff for the
51	 * top segment.  That's ok, the scramble below will translate
52	 * it to VSID 0, which is reserved as a bad VSID - one which
53	 * will never have any pages in it.  */
54
55	/* Check if hitting the linear mapping or some other kernel space
56	*/
57	bne	cr7,1f
58
59	/* Linear mapping encoding bits, the "li" instruction below will
60	 * be patched by the kernel at boot
61	 */
62.globl slb_miss_kernel_load_linear
63slb_miss_kernel_load_linear:
64	li	r11,0
65	/*
66	 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
67	 * r9 = region id.
68	 */
69	addis	r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
70	addi	r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
71
72
73BEGIN_FTR_SECTION
74	b	slb_finish_load
75END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
76	b	slb_finish_load_1T
77
781:
79#ifdef CONFIG_SPARSEMEM_VMEMMAP
80	/* Check virtual memmap region. To be patches at kernel boot */
81	cmpldi	cr0,r9,0xf
82	bne	1f
83.globl slb_miss_kernel_load_vmemmap
84slb_miss_kernel_load_vmemmap:
85	li	r11,0
86	b	6f
871:
88#endif /* CONFIG_SPARSEMEM_VMEMMAP */
89
90	/* vmalloc mapping gets the encoding from the PACA as the mapping
91	 * can be demoted from 64K -> 4K dynamically on some machines
92	 */
93	clrldi	r11,r10,48
94	cmpldi	r11,(VMALLOC_SIZE >> 28) - 1
95	bgt	5f
96	lhz	r11,PACAVMALLOCSLLP(r13)
97	b	6f
985:
99	/* IO mapping */
100.globl slb_miss_kernel_load_io
101slb_miss_kernel_load_io:
102	li	r11,0
1036:
104	/*
105	 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
106	 * r9 = region id.
107	 */
108	addis	r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
109	addi	r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
110
111BEGIN_FTR_SECTION
112	b	slb_finish_load
113END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
114	b	slb_finish_load_1T
115
1160:
117	/* when using slices, we extract the psize off the slice bitmaps
118	 * and then we need to get the sllp encoding off the mmu_psize_defs
119	 * array.
120	 *
121	 * XXX This is a bit inefficient especially for the normal case,
122	 * so we should try to implement a fast path for the standard page
123	 * size using the old sllp value so we avoid the array. We cannot
124	 * really do dynamic patching unfortunately as processes might flip
125	 * between 4k and 64k standard page size
126	 */
127#ifdef CONFIG_PPC_MM_SLICES
128	/* r10 have esid */
129	cmpldi	r10,16
130	/* below SLICE_LOW_TOP */
131	blt	5f
132	/*
133	 * Handle hpsizes,
134	 * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index
135	 */
136	srdi    r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */
137	addi	r9,r11,PACAHIGHSLICEPSIZE
138	lbzx	r9,r13,r9		/* r9 is hpsizes[r11] */
139	/* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */
140	rldicl	r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63
141	b	6f
142
1435:
144	/*
145	 * Handle lpsizes
146	 * r9 is get_paca()->context.low_slices_psize, r11 is index
147	 */
148	ld	r9,PACALOWSLICESPSIZE(r13)
149	mr	r11,r10
1506:
151	sldi	r11,r11,2  /* index * 4 */
152	/* Extract the psize and multiply to get an array offset */
153	srd	r9,r9,r11
154	andi.	r9,r9,0xf
155	mulli	r9,r9,MMUPSIZEDEFSIZE
156
157	/* Now get to the array and obtain the sllp
158	 */
159	ld	r11,PACATOC(r13)
160	ld	r11,mmu_psize_defs@got(r11)
161	add	r11,r11,r9
162	ld	r11,MMUPSIZESLLP(r11)
163	ori	r11,r11,SLB_VSID_USER
164#else
165	/* paca context sllp already contains the SLB_VSID_USER bits */
166	lhz	r11,PACACONTEXTSLLP(r13)
167#endif /* CONFIG_PPC_MM_SLICES */
168
169	ld	r9,PACACONTEXTID(r13)
170BEGIN_FTR_SECTION
171	cmpldi	r10,0x1000
172	bge	slb_finish_load_1T
173END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
174	b	slb_finish_load
175
1768:	/* invalid EA */
177	li	r10,0			/* BAD_VSID */
178	li	r9,0			/* BAD_VSID */
179	li	r11,SLB_VSID_USER	/* flags don't much matter */
180	b	slb_finish_load
181
182#ifdef __DISABLED__
183
184/* void slb_allocate_user(unsigned long ea);
185 *
186 * Create an SLB entry for the given EA (user or kernel).
187 * 	r3 = faulting address, r13 = PACA
188 *	r9, r10, r11 are clobbered by this function
189 * No other registers are examined or changed.
190 *
191 * It is called with translation enabled in order to be able to walk the
192 * page tables. This is not currently used.
193 */
194_GLOBAL(slb_allocate_user)
195	/* r3 = faulting address */
196	srdi	r10,r3,28		/* get esid */
197
198	crset	4*cr7+lt		/* set "user" flag for later */
199
200	/* check if we fit in the range covered by the pagetables*/
201	srdi.	r9,r3,PGTABLE_EADDR_SIZE
202	crnot	4*cr0+eq,4*cr0+eq
203	beqlr
204
205	/* now we need to get to the page tables in order to get the page
206	 * size encoding from the PMD. In the future, we'll be able to deal
207	 * with 1T segments too by getting the encoding from the PGD instead
208	 */
209	ld	r9,PACAPGDIR(r13)
210	cmpldi	cr0,r9,0
211	beqlr
212	rlwinm	r11,r10,8,25,28
213	ldx	r9,r9,r11		/* get pgd_t */
214	cmpldi	cr0,r9,0
215	beqlr
216	rlwinm	r11,r10,3,17,28
217	ldx	r9,r9,r11		/* get pmd_t */
218	cmpldi	cr0,r9,0
219	beqlr
220
221	/* build vsid flags */
222	andi.	r11,r9,SLB_VSID_LLP
223	ori	r11,r11,SLB_VSID_USER
224
225	/* get context to calculate proto-VSID */
226	ld	r9,PACACONTEXTID(r13)
227	/* fall through slb_finish_load */
228
229#endif /* __DISABLED__ */
230
231
232/*
233 * Finish loading of an SLB entry and return
234 *
235 * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
236 */
237slb_finish_load:
238	rldimi  r10,r9,ESID_BITS,0
239	ASM_VSID_SCRAMBLE(r10,r9,256M)
240	/*
241	 * bits above VSID_BITS_256M need to be ignored from r10
242	 * also combine VSID and flags
243	 */
244	rldimi	r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M))
245
246	/* r3 = EA, r11 = VSID data */
247	/*
248	 * Find a slot, round robin. Previously we tried to find a
249	 * free slot first but that took too long. Unfortunately we
250 	 * dont have any LRU information to help us choose a slot.
251 	 */
252
2537:	ld	r10,PACASTABRR(r13)
254	addi	r10,r10,1
255	/* This gets soft patched on boot. */
256.globl slb_compare_rr_to_size
257slb_compare_rr_to_size:
258	cmpldi	r10,0
259
260	blt+	4f
261	li	r10,SLB_NUM_BOLTED
262
2634:
264	std	r10,PACASTABRR(r13)
265
2663:
267	rldimi	r3,r10,0,36		/* r3= EA[0:35] | entry */
268	oris	r10,r3,SLB_ESID_V@h	/* r3 |= SLB_ESID_V */
269
270	/* r3 = ESID data, r11 = VSID data */
271
272	/*
273	 * No need for an isync before or after this slbmte. The exception
274	 * we enter with and the rfid we exit with are context synchronizing.
275	 */
276	slbmte	r11,r10
277
278	/* we're done for kernel addresses */
279	crclr	4*cr0+eq		/* set result to "success" */
280	bgelr	cr7
281
282	/* Update the slb cache */
283	lhz	r3,PACASLBCACHEPTR(r13)	/* offset = paca->slb_cache_ptr */
284	cmpldi	r3,SLB_CACHE_ENTRIES
285	bge	1f
286
287	/* still room in the slb cache */
288	sldi	r11,r3,2		/* r11 = offset * sizeof(u32) */
289	srdi    r10,r10,28		/* get the 36 bits of the ESID */
290	add	r11,r11,r13		/* r11 = (u32 *)paca + offset */
291	stw	r10,PACASLBCACHE(r11)	/* paca->slb_cache[offset] = esid */
292	addi	r3,r3,1			/* offset++ */
293	b	2f
2941:					/* offset >= SLB_CACHE_ENTRIES */
295	li	r3,SLB_CACHE_ENTRIES+1
2962:
297	sth	r3,PACASLBCACHEPTR(r13)	/* paca->slb_cache_ptr = offset */
298	crclr	4*cr0+eq		/* set result to "success" */
299	blr
300
301/*
302 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
303 *
304 * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
305 */
306slb_finish_load_1T:
307	srdi	r10,r10,(SID_SHIFT_1T - SID_SHIFT)	/* get 1T ESID */
308	rldimi  r10,r9,ESID_BITS_1T,0
309	ASM_VSID_SCRAMBLE(r10,r9,1T)
310	/*
311	 * bits above VSID_BITS_1T need to be ignored from r10
312	 * also combine VSID and flags
313	 */
314	rldimi	r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T))
315	li	r10,MMU_SEGSIZE_1T
316	rldimi	r11,r10,SLB_VSID_SSIZE_SHIFT,0	/* insert segment size */
317
318	/* r3 = EA, r11 = VSID data */
319	clrrdi	r3,r3,SID_SHIFT_1T	/* clear out non-ESID bits */
320	b	7b
321
322