1#ifndef _ASM_POWERPC_PGTABLE_PPC32_H
2#define _ASM_POWERPC_PGTABLE_PPC32_H
3
4#include <asm-generic/pgtable-nopmd.h>
5
6#ifndef __ASSEMBLY__
7#include <linux/sched.h>
8#include <linux/threads.h>
9#include <asm/io.h>			/* For sub-arch specific PPC_PIN_SIZE */
10
11extern unsigned long ioremap_bot;
12
13#ifdef CONFIG_44x
14extern int icache_44x_need_flush;
15#endif
16
17#endif /* __ASSEMBLY__ */
18
19/*
20 * The normal case is that PTEs are 32-bits and we have a 1-page
21 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
22 *
23 * For any >32-bit physical address platform, we can use the following
24 * two level page table layout where the pgdir is 8KB and the MS 13 bits
25 * are an index to the second level table.  The combined pgdir/pmd first
26 * level has 2048 entries and the second level has 512 64-bit PTE entries.
27 * -Matt
28 */
29/* PGDIR_SHIFT determines what a top-level page table entry can map */
30#define PGDIR_SHIFT	(PAGE_SHIFT + PTE_SHIFT)
31#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
32#define PGDIR_MASK	(~(PGDIR_SIZE-1))
33
34/*
35 * entries per page directory level: our page-table tree is two-level, so
36 * we don't really have any PMD directory.
37 */
38#ifndef __ASSEMBLY__
39#define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_SHIFT)
40#define PGD_TABLE_SIZE	(sizeof(pgd_t) << (32 - PGDIR_SHIFT))
41#endif	/* __ASSEMBLY__ */
42
43#define PTRS_PER_PTE	(1 << PTE_SHIFT)
44#define PTRS_PER_PMD	1
45#define PTRS_PER_PGD	(1 << (32 - PGDIR_SHIFT))
46
47#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
48#define FIRST_USER_ADDRESS	0UL
49
50#define pte_ERROR(e) \
51	pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
52		(unsigned long long)pte_val(e))
53#define pgd_ERROR(e) \
54	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
55
56/*
57 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
58 * value (for now) on others, from where we can start layout kernel
59 * virtual space that goes below PKMAP and FIXMAP
60 */
61#ifdef CONFIG_HIGHMEM
62#define KVIRT_TOP	PKMAP_BASE
63#else
64#define KVIRT_TOP	(0xfe000000UL)	/* for now, could be FIXMAP_BASE ? */
65#endif
66
67/*
68 * ioremap_bot starts at that address. Early ioremaps move down from there,
69 * until mem_init() at which point this becomes the top of the vmalloc
70 * and ioremap space
71 */
72#ifdef CONFIG_NOT_COHERENT_CACHE
73#define IOREMAP_TOP	((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
74#else
75#define IOREMAP_TOP	KVIRT_TOP
76#endif
77
78/*
79 * Just any arbitrary offset to the start of the vmalloc VM area: the
80 * current 16MB value just means that there will be a 64MB "hole" after the
81 * physical memory until the kernel virtual memory starts.  That means that
82 * any out-of-bounds memory accesses will hopefully be caught.
83 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
84 * area for the same reason. ;)
85 *
86 * We no longer map larger than phys RAM with the BATs so we don't have
87 * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
88 * about clashes between our early calls to ioremap() that start growing down
89 * from ioremap_base being run into the VM area allocations (growing upwards
90 * from VMALLOC_START).  For this reason we have ioremap_bot to check when
91 * we actually run into our mappings setup in the early boot with the VM
92 * system.  This really does become a problem for machines with good amounts
93 * of RAM.  -- Cort
94 */
95#define VMALLOC_OFFSET (0x1000000) /* 16M */
96#ifdef PPC_PIN_SIZE
97#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
98#else
99#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
100#endif
101#define VMALLOC_END	ioremap_bot
102
103/*
104 * Bits in a linux-style PTE.  These match the bits in the
105 * (hardware-defined) PowerPC PTE as closely as possible.
106 */
107
108#if defined(CONFIG_40x)
109#include <asm/pte-40x.h>
110#elif defined(CONFIG_44x)
111#include <asm/pte-44x.h>
112#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
113#include <asm/pte-book3e.h>
114#elif defined(CONFIG_FSL_BOOKE)
115#include <asm/pte-fsl-booke.h>
116#elif defined(CONFIG_8xx)
117#include <asm/pte-8xx.h>
118#else /* CONFIG_6xx */
119#include <asm/pte-hash32.h>
120#endif
121
122/* And here we include common definitions */
123#include <asm/pte-common.h>
124
125#ifndef __ASSEMBLY__
126
127#define pte_clear(mm, addr, ptep) \
128	do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
129
130#define pmd_none(pmd)		(!pmd_val(pmd))
131#define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
132#define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
133#define	pmd_clear(pmdp)		do { pmd_val(*(pmdp)) = 0; } while (0)
134
135/*
136 * When flushing the tlb entry for a page, we also need to flush the hash
137 * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
138 */
139extern int flush_hash_pages(unsigned context, unsigned long va,
140			    unsigned long pmdval, int count);
141
142/* Add an HPTE to the hash table */
143extern void add_hash_page(unsigned context, unsigned long va,
144			  unsigned long pmdval);
145
146/* Flush an entry from the TLB/hash table */
147extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
148			     unsigned long address);
149
150/*
151 * PTE updates. This function is called whenever an existing
152 * valid PTE is updated. This does -not- include set_pte_at()
153 * which nowadays only sets a new PTE.
154 *
155 * Depending on the type of MMU, we may need to use atomic updates
156 * and the PTE may be either 32 or 64 bit wide. In the later case,
157 * when using atomic updates, only the low part of the PTE is
158 * accessed atomically.
159 *
160 * In addition, on 44x, we also maintain a global flag indicating
161 * that an executable user mapping was modified, which is needed
162 * to properly flush the virtually tagged instruction cache of
163 * those implementations.
164 */
165#ifndef CONFIG_PTE_64BIT
166static inline unsigned long pte_update(pte_t *p,
167				       unsigned long clr,
168				       unsigned long set)
169{
170#ifdef PTE_ATOMIC_UPDATES
171	unsigned long old, tmp;
172
173#ifdef CONFIG_PPC_8xx
174	unsigned long tmp2;
175
176	__asm__ __volatile__("\
1771:	lwarx	%0,0,%4\n\
178	andc	%1,%0,%5\n\
179	or	%1,%1,%6\n\
180	/* 0x200 == Extended encoding, bit 22 */ \
181	/* Bit 22 has to be 1 when _PAGE_USER is unset and _PAGE_RO is set */ \
182	rlwimi	%1,%1,32-1,0x200\n /* get _PAGE_RO */ \
183	rlwinm	%3,%1,32-2,0x200\n /* get _PAGE_USER */ \
184	andc	%1,%1,%3\n\
185	stwcx.	%1,0,%4\n\
186	bne-	1b"
187	: "=&r" (old), "=&r" (tmp), "=m" (*p), "=&r" (tmp2)
188	: "r" (p), "r" (clr), "r" (set), "m" (*p)
189	: "cc" );
190#else /* CONFIG_PPC_8xx */
191	__asm__ __volatile__("\
1921:	lwarx	%0,0,%3\n\
193	andc	%1,%0,%4\n\
194	or	%1,%1,%5\n"
195	PPC405_ERR77(0,%3)
196"	stwcx.	%1,0,%3\n\
197	bne-	1b"
198	: "=&r" (old), "=&r" (tmp), "=m" (*p)
199	: "r" (p), "r" (clr), "r" (set), "m" (*p)
200	: "cc" );
201#endif /* CONFIG_PPC_8xx */
202#else /* PTE_ATOMIC_UPDATES */
203	unsigned long old = pte_val(*p);
204	*p = __pte((old & ~clr) | set);
205#endif /* !PTE_ATOMIC_UPDATES */
206
207#ifdef CONFIG_44x
208	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
209		icache_44x_need_flush = 1;
210#endif
211	return old;
212}
213#else /* CONFIG_PTE_64BIT */
214static inline unsigned long long pte_update(pte_t *p,
215					    unsigned long clr,
216					    unsigned long set)
217{
218#ifdef PTE_ATOMIC_UPDATES
219	unsigned long long old;
220	unsigned long tmp;
221
222	__asm__ __volatile__("\
2231:	lwarx	%L0,0,%4\n\
224	lwzx	%0,0,%3\n\
225	andc	%1,%L0,%5\n\
226	or	%1,%1,%6\n"
227	PPC405_ERR77(0,%3)
228"	stwcx.	%1,0,%4\n\
229	bne-	1b"
230	: "=&r" (old), "=&r" (tmp), "=m" (*p)
231	: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
232	: "cc" );
233#else /* PTE_ATOMIC_UPDATES */
234	unsigned long long old = pte_val(*p);
235	*p = __pte((old & ~(unsigned long long)clr) | set);
236#endif /* !PTE_ATOMIC_UPDATES */
237
238#ifdef CONFIG_44x
239	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
240		icache_44x_need_flush = 1;
241#endif
242	return old;
243}
244#endif /* CONFIG_PTE_64BIT */
245
246/*
247 * 2.6 calls this without flushing the TLB entry; this is wrong
248 * for our hash-based implementation, we fix that up here.
249 */
250#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
251static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
252{
253	unsigned long old;
254	old = pte_update(ptep, _PAGE_ACCESSED, 0);
255#if _PAGE_HASHPTE != 0
256	if (old & _PAGE_HASHPTE) {
257		unsigned long ptephys = __pa(ptep) & PAGE_MASK;
258		flush_hash_pages(context, addr, ptephys, 1);
259	}
260#endif
261	return (old & _PAGE_ACCESSED) != 0;
262}
263#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
264	__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
265
266#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
267static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
268				       pte_t *ptep)
269{
270	return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
271}
272
273#define __HAVE_ARCH_PTEP_SET_WRPROTECT
274static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
275				      pte_t *ptep)
276{
277	pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
278}
279static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
280					   unsigned long addr, pte_t *ptep)
281{
282	ptep_set_wrprotect(mm, addr, ptep);
283}
284
285
286static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
287{
288	unsigned long set = pte_val(entry) &
289		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
290	unsigned long clr = ~pte_val(entry) & _PAGE_RO;
291
292	pte_update(ptep, clr, set);
293}
294
295#define __HAVE_ARCH_PTE_SAME
296#define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
297
298/*
299 * Note that on Book E processors, the pmd contains the kernel virtual
300 * (lowmem) address of the pte page.  The physical address is less useful
301 * because everything runs with translation enabled (even the TLB miss
302 * handler).  On everything else the pmd contains the physical address
303 * of the pte page.  -- paulus
304 */
305#ifndef CONFIG_BOOKE
306#define pmd_page_vaddr(pmd)	\
307	((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
308#define pmd_page(pmd)		\
309	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
310#else
311#define pmd_page_vaddr(pmd)	\
312	((unsigned long) (pmd_val(pmd) & PAGE_MASK))
313#define pmd_page(pmd)		\
314	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
315#endif
316
317/* to find an entry in a kernel page-table-directory */
318#define pgd_offset_k(address) pgd_offset(&init_mm, address)
319
320/* to find an entry in a page-table-directory */
321#define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
322#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
323
324/* Find an entry in the third-level page table.. */
325#define pte_index(address)		\
326	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
327#define pte_offset_kernel(dir, addr)	\
328	((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
329#define pte_offset_map(dir, addr)		\
330	((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
331#define pte_unmap(pte)		kunmap_atomic(pte)
332
333/*
334 * Encode and decode a swap entry.
335 * Note that the bits we use in a PTE for representing a swap entry
336 * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
337 *   -- paulus
338 */
339#define __swp_type(entry)		((entry).val & 0x1f)
340#define __swp_offset(entry)		((entry).val >> 5)
341#define __swp_entry(type, offset)	((swp_entry_t) { (type) | ((offset) << 5) })
342#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
343#define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
344
345#ifndef CONFIG_PPC_4K_PAGES
346void pgtable_cache_init(void);
347#else
348/*
349 * No page table caches to initialise
350 */
351#define pgtable_cache_init()	do { } while (0)
352#endif
353
354extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
355		      pmd_t **pmdp);
356
357#endif /* !__ASSEMBLY__ */
358
359#endif /* _ASM_POWERPC_PGTABLE_PPC32_H */
360