1 #ifndef _ASM_POWERPC_PGTABLE_PPC32_H
2 #define _ASM_POWERPC_PGTABLE_PPC32_H
3 
4 #include <asm-generic/pgtable-nopmd.h>
5 
6 #ifndef __ASSEMBLY__
7 #include <linux/sched.h>
8 #include <linux/threads.h>
9 #include <asm/io.h>			/* For sub-arch specific PPC_PIN_SIZE */
10 
11 extern unsigned long ioremap_bot;
12 
13 #ifdef CONFIG_44x
14 extern int icache_44x_need_flush;
15 #endif
16 
17 #endif /* __ASSEMBLY__ */
18 
19 /*
20  * The normal case is that PTEs are 32-bits and we have a 1-page
21  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
22  *
23  * For any >32-bit physical address platform, we can use the following
24  * two level page table layout where the pgdir is 8KB and the MS 13 bits
25  * are an index to the second level table.  The combined pgdir/pmd first
26  * level has 2048 entries and the second level has 512 64-bit PTE entries.
27  * -Matt
28  */
29 /* PGDIR_SHIFT determines what a top-level page table entry can map */
30 #define PGDIR_SHIFT	(PAGE_SHIFT + PTE_SHIFT)
31 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
32 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
33 
34 /*
35  * entries per page directory level: our page-table tree is two-level, so
36  * we don't really have any PMD directory.
37  */
38 #ifndef __ASSEMBLY__
39 #define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_SHIFT)
40 #define PGD_TABLE_SIZE	(sizeof(pgd_t) << (32 - PGDIR_SHIFT))
41 #endif	/* __ASSEMBLY__ */
42 
43 #define PTRS_PER_PTE	(1 << PTE_SHIFT)
44 #define PTRS_PER_PMD	1
45 #define PTRS_PER_PGD	(1 << (32 - PGDIR_SHIFT))
46 
47 #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
48 #define FIRST_USER_ADDRESS	0UL
49 
50 #define pte_ERROR(e) \
51 	pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
52 		(unsigned long long)pte_val(e))
53 #define pgd_ERROR(e) \
54 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
55 
56 /*
57  * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
58  * value (for now) on others, from where we can start layout kernel
59  * virtual space that goes below PKMAP and FIXMAP
60  */
61 #ifdef CONFIG_HIGHMEM
62 #define KVIRT_TOP	PKMAP_BASE
63 #else
64 #define KVIRT_TOP	(0xfe000000UL)	/* for now, could be FIXMAP_BASE ? */
65 #endif
66 
67 /*
68  * ioremap_bot starts at that address. Early ioremaps move down from there,
69  * until mem_init() at which point this becomes the top of the vmalloc
70  * and ioremap space
71  */
72 #ifdef CONFIG_NOT_COHERENT_CACHE
73 #define IOREMAP_TOP	((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
74 #else
75 #define IOREMAP_TOP	KVIRT_TOP
76 #endif
77 
78 /*
79  * Just any arbitrary offset to the start of the vmalloc VM area: the
80  * current 16MB value just means that there will be a 64MB "hole" after the
81  * physical memory until the kernel virtual memory starts.  That means that
82  * any out-of-bounds memory accesses will hopefully be caught.
83  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
84  * area for the same reason. ;)
85  *
86  * We no longer map larger than phys RAM with the BATs so we don't have
87  * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
88  * about clashes between our early calls to ioremap() that start growing down
89  * from ioremap_base being run into the VM area allocations (growing upwards
90  * from VMALLOC_START).  For this reason we have ioremap_bot to check when
91  * we actually run into our mappings setup in the early boot with the VM
92  * system.  This really does become a problem for machines with good amounts
93  * of RAM.  -- Cort
94  */
95 #define VMALLOC_OFFSET (0x1000000) /* 16M */
96 #ifdef PPC_PIN_SIZE
97 #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
98 #else
99 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
100 #endif
101 #define VMALLOC_END	ioremap_bot
102 
103 /*
104  * Bits in a linux-style PTE.  These match the bits in the
105  * (hardware-defined) PowerPC PTE as closely as possible.
106  */
107 
108 #if defined(CONFIG_40x)
109 #include <asm/pte-40x.h>
110 #elif defined(CONFIG_44x)
111 #include <asm/pte-44x.h>
112 #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
113 #include <asm/pte-book3e.h>
114 #elif defined(CONFIG_FSL_BOOKE)
115 #include <asm/pte-fsl-booke.h>
116 #elif defined(CONFIG_8xx)
117 #include <asm/pte-8xx.h>
118 #else /* CONFIG_6xx */
119 #include <asm/pte-hash32.h>
120 #endif
121 
122 /* And here we include common definitions */
123 #include <asm/pte-common.h>
124 
125 #ifndef __ASSEMBLY__
126 
127 #define pte_clear(mm, addr, ptep) \
128 	do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
129 
130 #define pmd_none(pmd)		(!pmd_val(pmd))
131 #define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
132 #define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
133 #define	pmd_clear(pmdp)		do { pmd_val(*(pmdp)) = 0; } while (0)
134 
135 /*
136  * When flushing the tlb entry for a page, we also need to flush the hash
137  * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
138  */
139 extern int flush_hash_pages(unsigned context, unsigned long va,
140 			    unsigned long pmdval, int count);
141 
142 /* Add an HPTE to the hash table */
143 extern void add_hash_page(unsigned context, unsigned long va,
144 			  unsigned long pmdval);
145 
146 /* Flush an entry from the TLB/hash table */
147 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
148 			     unsigned long address);
149 
150 /*
151  * PTE updates. This function is called whenever an existing
152  * valid PTE is updated. This does -not- include set_pte_at()
153  * which nowadays only sets a new PTE.
154  *
155  * Depending on the type of MMU, we may need to use atomic updates
156  * and the PTE may be either 32 or 64 bit wide. In the later case,
157  * when using atomic updates, only the low part of the PTE is
158  * accessed atomically.
159  *
160  * In addition, on 44x, we also maintain a global flag indicating
161  * that an executable user mapping was modified, which is needed
162  * to properly flush the virtually tagged instruction cache of
163  * those implementations.
164  */
165 #ifndef CONFIG_PTE_64BIT
pte_update(pte_t * p,unsigned long clr,unsigned long set)166 static inline unsigned long pte_update(pte_t *p,
167 				       unsigned long clr,
168 				       unsigned long set)
169 {
170 #ifdef PTE_ATOMIC_UPDATES
171 	unsigned long old, tmp;
172 
173 	__asm__ __volatile__("\
174 1:	lwarx	%0,0,%3\n\
175 	andc	%1,%0,%4\n\
176 	or	%1,%1,%5\n"
177 	PPC405_ERR77(0,%3)
178 "	stwcx.	%1,0,%3\n\
179 	bne-	1b"
180 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
181 	: "r" (p), "r" (clr), "r" (set), "m" (*p)
182 	: "cc" );
183 #else /* PTE_ATOMIC_UPDATES */
184 	unsigned long old = pte_val(*p);
185 	*p = __pte((old & ~clr) | set);
186 #endif /* !PTE_ATOMIC_UPDATES */
187 
188 #ifdef CONFIG_44x
189 	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
190 		icache_44x_need_flush = 1;
191 #endif
192 	return old;
193 }
194 #else /* CONFIG_PTE_64BIT */
pte_update(pte_t * p,unsigned long clr,unsigned long set)195 static inline unsigned long long pte_update(pte_t *p,
196 					    unsigned long clr,
197 					    unsigned long set)
198 {
199 #ifdef PTE_ATOMIC_UPDATES
200 	unsigned long long old;
201 	unsigned long tmp;
202 
203 	__asm__ __volatile__("\
204 1:	lwarx	%L0,0,%4\n\
205 	lwzx	%0,0,%3\n\
206 	andc	%1,%L0,%5\n\
207 	or	%1,%1,%6\n"
208 	PPC405_ERR77(0,%3)
209 "	stwcx.	%1,0,%4\n\
210 	bne-	1b"
211 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
212 	: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
213 	: "cc" );
214 #else /* PTE_ATOMIC_UPDATES */
215 	unsigned long long old = pte_val(*p);
216 	*p = __pte((old & ~(unsigned long long)clr) | set);
217 #endif /* !PTE_ATOMIC_UPDATES */
218 
219 #ifdef CONFIG_44x
220 	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
221 		icache_44x_need_flush = 1;
222 #endif
223 	return old;
224 }
225 #endif /* CONFIG_PTE_64BIT */
226 
227 /*
228  * 2.6 calls this without flushing the TLB entry; this is wrong
229  * for our hash-based implementation, we fix that up here.
230  */
231 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
__ptep_test_and_clear_young(unsigned int context,unsigned long addr,pte_t * ptep)232 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
233 {
234 	unsigned long old;
235 	old = pte_update(ptep, _PAGE_ACCESSED, 0);
236 #if _PAGE_HASHPTE != 0
237 	if (old & _PAGE_HASHPTE) {
238 		unsigned long ptephys = __pa(ptep) & PAGE_MASK;
239 		flush_hash_pages(context, addr, ptephys, 1);
240 	}
241 #endif
242 	return (old & _PAGE_ACCESSED) != 0;
243 }
244 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
245 	__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
246 
247 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)248 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
249 				       pte_t *ptep)
250 {
251 	return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
252 }
253 
254 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)255 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
256 				      pte_t *ptep)
257 {
258 	pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
259 }
huge_ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)260 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
261 					   unsigned long addr, pte_t *ptep)
262 {
263 	ptep_set_wrprotect(mm, addr, ptep);
264 }
265 
266 
__ptep_set_access_flags(pte_t * ptep,pte_t entry)267 static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
268 {
269 	unsigned long set = pte_val(entry) &
270 		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
271 	unsigned long clr = ~pte_val(entry) & _PAGE_RO;
272 
273 	pte_update(ptep, clr, set);
274 }
275 
276 #define __HAVE_ARCH_PTE_SAME
277 #define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
278 
279 /*
280  * Note that on Book E processors, the pmd contains the kernel virtual
281  * (lowmem) address of the pte page.  The physical address is less useful
282  * because everything runs with translation enabled (even the TLB miss
283  * handler).  On everything else the pmd contains the physical address
284  * of the pte page.  -- paulus
285  */
286 #ifndef CONFIG_BOOKE
287 #define pmd_page_vaddr(pmd)	\
288 	((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
289 #define pmd_page(pmd)		\
290 	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
291 #else
292 #define pmd_page_vaddr(pmd)	\
293 	((unsigned long) (pmd_val(pmd) & PAGE_MASK))
294 #define pmd_page(pmd)		\
295 	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
296 #endif
297 
298 /* to find an entry in a kernel page-table-directory */
299 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
300 
301 /* to find an entry in a page-table-directory */
302 #define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
303 #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
304 
305 /* Find an entry in the third-level page table.. */
306 #define pte_index(address)		\
307 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
308 #define pte_offset_kernel(dir, addr)	\
309 	((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
310 #define pte_offset_map(dir, addr)		\
311 	((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
312 #define pte_unmap(pte)		kunmap_atomic(pte)
313 
314 /*
315  * Encode and decode a swap entry.
316  * Note that the bits we use in a PTE for representing a swap entry
317  * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
318  *   -- paulus
319  */
320 #define __swp_type(entry)		((entry).val & 0x1f)
321 #define __swp_offset(entry)		((entry).val >> 5)
322 #define __swp_entry(type, offset)	((swp_entry_t) { (type) | ((offset) << 5) })
323 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
324 #define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
325 
326 #ifndef CONFIG_PPC_4K_PAGES
327 void pgtable_cache_init(void);
328 #else
329 /*
330  * No page table caches to initialise
331  */
332 #define pgtable_cache_init()	do { } while (0)
333 #endif
334 
335 extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
336 		      pmd_t **pmdp);
337 
338 #endif /* !__ASSEMBLY__ */
339 
340 #endif /* _ASM_POWERPC_PGTABLE_PPC32_H */
341