This source file includes following definitions.
- pte_mkwrite
- pte_mkdirty
- pte_mkyoung
- pte_wrprotect
- pte_mkexec
- pmd_clear
- pte_update
- pte_update
- __ptep_test_and_clear_young
- ptep_get_and_clear
- ptep_set_wrprotect
- __ptep_set_access_flags
- pte_young
1
2 #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
3 #define _ASM_POWERPC_NOHASH_32_PGTABLE_H
4
5 #define __ARCH_USE_5LEVEL_HACK
6 #include <asm-generic/pgtable-nopmd.h>
7
8 #ifndef __ASSEMBLY__
9 #include <linux/sched.h>
10 #include <linux/threads.h>
11 #include <asm/mmu.h>
12 #include <asm/asm-405.h>
13
14 #ifdef CONFIG_44x
15 extern int icache_44x_need_flush;
16 #endif
17
18 #endif
19
20 #define PTE_INDEX_SIZE PTE_SHIFT
21 #define PMD_INDEX_SIZE 0
22 #define PUD_INDEX_SIZE 0
23 #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
24
25 #define PMD_CACHE_INDEX PMD_INDEX_SIZE
26 #define PUD_CACHE_INDEX PUD_INDEX_SIZE
27
28 #ifndef __ASSEMBLY__
29 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
30 #define PMD_TABLE_SIZE 0
31 #define PUD_TABLE_SIZE 0
32 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
33 #endif
34
35 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
36 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
37
38
39
40
41
42
43
44
45
46
47
48
49 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
50 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
51 #define PGDIR_MASK (~(PGDIR_SIZE-1))
52
53
54 #define PGD_MASKED_BITS 0
55
56 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
57 #define FIRST_USER_ADDRESS 0UL
58
59 #define pte_ERROR(e) \
60 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
61 (unsigned long long)pte_val(e))
62 #define pgd_ERROR(e) \
63 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
64
65 #ifndef __ASSEMBLY__
66
67 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
68
69 #endif
70
71
72
73
74
75
76
77 #include <asm/fixmap.h>
78
79
80
81
82
83
84 #ifdef CONFIG_HIGHMEM
85 #define IOREMAP_TOP PKMAP_BASE
86 #else
87 #define IOREMAP_TOP FIXADDR_START
88 #endif
89
90
91 #define IOREMAP_START VMALLOC_START
92 #define IOREMAP_END VMALLOC_END
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111 #define VMALLOC_OFFSET (0x1000000)
112 #ifdef PPC_PIN_SIZE
113 #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
114 #else
115 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
116 #endif
117 #define VMALLOC_END ioremap_bot
118
119
120
121
122
123
124 #if defined(CONFIG_40x)
125 #include <asm/nohash/32/pte-40x.h>
126 #elif defined(CONFIG_44x)
127 #include <asm/nohash/32/pte-44x.h>
128 #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
129 #include <asm/nohash/pte-book3e.h>
130 #elif defined(CONFIG_FSL_BOOKE)
131 #include <asm/nohash/32/pte-fsl-booke.h>
132 #elif defined(CONFIG_PPC_8xx)
133 #include <asm/nohash/32/pte-8xx.h>
134 #endif
135
136
137
138
139
140
141 #ifndef PTE_RPN_SHIFT
142 #define PTE_RPN_SHIFT (PAGE_SHIFT)
143 #endif
144
145
146
147
148
149 #if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
150 #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
151 #else
152 #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
153 #endif
154
155
156
157
158
159 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
160
161 #ifndef __ASSEMBLY__
162
163 #define pte_clear(mm, addr, ptep) \
164 do { pte_update(ptep, ~0, 0); } while (0)
165
166 #ifndef pte_mkwrite
167 static inline pte_t pte_mkwrite(pte_t pte)
168 {
169 return __pte(pte_val(pte) | _PAGE_RW);
170 }
171 #endif
172
173 static inline pte_t pte_mkdirty(pte_t pte)
174 {
175 return __pte(pte_val(pte) | _PAGE_DIRTY);
176 }
177
178 static inline pte_t pte_mkyoung(pte_t pte)
179 {
180 return __pte(pte_val(pte) | _PAGE_ACCESSED);
181 }
182
183 #ifndef pte_wrprotect
184 static inline pte_t pte_wrprotect(pte_t pte)
185 {
186 return __pte(pte_val(pte) & ~_PAGE_RW);
187 }
188 #endif
189
190 static inline pte_t pte_mkexec(pte_t pte)
191 {
192 return __pte(pte_val(pte) | _PAGE_EXEC);
193 }
194
195 #define pmd_none(pmd) (!pmd_val(pmd))
196 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
197 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
198 static inline void pmd_clear(pmd_t *pmdp)
199 {
200 *pmdp = __pmd(0);
201 }
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220 #ifndef CONFIG_PTE_64BIT
221 static inline unsigned long pte_update(pte_t *p,
222 unsigned long clr,
223 unsigned long set)
224 {
225 #ifdef PTE_ATOMIC_UPDATES
226 unsigned long old, tmp;
227
228 __asm__ __volatile__("\
229 1: lwarx %0,0,%3\n\
230 andc %1,%0,%4\n\
231 or %1,%1,%5\n"
232 PPC405_ERR77(0,%3)
233 " stwcx. %1,0,%3\n\
234 bne- 1b"
235 : "=&r" (old), "=&r" (tmp), "=m" (*p)
236 : "r" (p), "r" (clr), "r" (set), "m" (*p)
237 : "cc" );
238 #else
239 unsigned long old = pte_val(*p);
240 unsigned long new = (old & ~clr) | set;
241
242 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
243 p->pte = p->pte1 = p->pte2 = p->pte3 = new;
244 #else
245 *p = __pte(new);
246 #endif
247 #endif
248
249 #ifdef CONFIG_44x
250 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
251 icache_44x_need_flush = 1;
252 #endif
253 return old;
254 }
255 #else
256 static inline unsigned long long pte_update(pte_t *p,
257 unsigned long clr,
258 unsigned long set)
259 {
260 #ifdef PTE_ATOMIC_UPDATES
261 unsigned long long old;
262 unsigned long tmp;
263
264 __asm__ __volatile__("\
265 1: lwarx %L0,0,%4\n\
266 lwzx %0,0,%3\n\
267 andc %1,%L0,%5\n\
268 or %1,%1,%6\n"
269 PPC405_ERR77(0,%3)
270 " stwcx. %1,0,%4\n\
271 bne- 1b"
272 : "=&r" (old), "=&r" (tmp), "=m" (*p)
273 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
274 : "cc" );
275 #else
276 unsigned long long old = pte_val(*p);
277 *p = __pte((old & ~(unsigned long long)clr) | set);
278 #endif
279
280 #ifdef CONFIG_44x
281 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
282 icache_44x_need_flush = 1;
283 #endif
284 return old;
285 }
286 #endif
287
288 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
289 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
290 {
291 unsigned long old;
292 old = pte_update(ptep, _PAGE_ACCESSED, 0);
293 return (old & _PAGE_ACCESSED) != 0;
294 }
295 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
296 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
297
298 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
299 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
300 pte_t *ptep)
301 {
302 return __pte(pte_update(ptep, ~0, 0));
303 }
304
305 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
306 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
307 pte_t *ptep)
308 {
309 unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
310 unsigned long set = pte_val(pte_wrprotect(__pte(0)));
311
312 pte_update(ptep, clr, set);
313 }
314
315 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
316 pte_t *ptep, pte_t entry,
317 unsigned long address,
318 int psize)
319 {
320 pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0)))));
321 pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0)))));
322 unsigned long set = pte_val(entry) & pte_val(pte_set);
323 unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr);
324
325 pte_update(ptep, clr, set);
326
327 flush_tlb_page(vma, address);
328 }
329
330 static inline int pte_young(pte_t pte)
331 {
332 return pte_val(pte) & _PAGE_ACCESSED;
333 }
334
335 #define __HAVE_ARCH_PTE_SAME
336 #define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0)
337
338
339
340
341
342
343
344
345 #ifndef CONFIG_BOOKE
346 #define pmd_page_vaddr(pmd) \
347 ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
348 #define pmd_page(pmd) \
349 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
350 #else
351 #define pmd_page_vaddr(pmd) \
352 ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
353 #define pmd_page(pmd) \
354 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
355 #endif
356
357
358 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
359
360
361 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
362 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
363
364
365 #define pte_index(address) \
366 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
367 #define pte_offset_kernel(dir, addr) \
368 (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
369 pte_index(addr))
370 #define pte_offset_map(dir, addr) \
371 ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
372 (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
373 #define pte_unmap(pte) kunmap_atomic(pte)
374
375
376
377
378
379
380
381 #define __swp_type(entry) ((entry).val & 0x1f)
382 #define __swp_offset(entry) ((entry).val >> 5)
383 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
384 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
385 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
386
387 #endif
388
389 #endif