1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Ralf Baechle
7 */
8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H
10
11#include <linux/mm_types.h>
12#include <linux/mmzone.h>
13#ifdef CONFIG_32BIT
14#include <asm/pgtable-32.h>
15#endif
16#ifdef CONFIG_64BIT
17#include <asm/pgtable-64.h>
18#endif
19
20#include <asm/io.h>
21#include <asm/pgtable-bits.h>
22
23struct mm_struct;
24struct vm_area_struct;
25
26#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
27#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | \
28				 _page_cachable_default)
29#define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_NO_EXEC | \
30				 _page_cachable_default)
31#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_READ | \
32				 _page_cachable_default)
33#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
34				 _PAGE_GLOBAL | _page_cachable_default)
35#define PAGE_KERNEL_NC	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
36				 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
37#define PAGE_USERIO	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
38				 _page_cachable_default)
39#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
40			__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
41
42/*
43 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
44 * execute, and consider it to be the same as read. Also, write
45 * permissions imply read permissions. This is the closest we can get
46 * by reasonable means..
47 */
48
49/*
50 * Dummy values to fill the table in mmap.c
51 * The real values will be generated at runtime
52 */
53#define __P000 __pgprot(0)
54#define __P001 __pgprot(0)
55#define __P010 __pgprot(0)
56#define __P011 __pgprot(0)
57#define __P100 __pgprot(0)
58#define __P101 __pgprot(0)
59#define __P110 __pgprot(0)
60#define __P111 __pgprot(0)
61
62#define __S000 __pgprot(0)
63#define __S001 __pgprot(0)
64#define __S010 __pgprot(0)
65#define __S011 __pgprot(0)
66#define __S100 __pgprot(0)
67#define __S101 __pgprot(0)
68#define __S110 __pgprot(0)
69#define __S111 __pgprot(0)
70
71extern unsigned long _page_cachable_default;
72
73/*
74 * ZERO_PAGE is a global shared page that is always zero; used
75 * for zero-mapped memory areas etc..
76 */
77
78extern unsigned long empty_zero_page;
79extern unsigned long zero_page_mask;
80
81#define ZERO_PAGE(vaddr) \
82	(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
83#define __HAVE_COLOR_ZERO_PAGE
84
85extern void paging_init(void);
86
87/*
88 * Conversion functions: convert a page and protection to a page entry,
89 * and a page entry and page directory to the page they refer to.
90 */
91#define pmd_phys(pmd)		virt_to_phys((void *)pmd_val(pmd))
92
93#define __pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
94#ifndef CONFIG_TRANSPARENT_HUGEPAGE
95#define pmd_page(pmd)		__pmd_page(pmd)
96#endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
97
98#define pmd_page_vaddr(pmd)	pmd_val(pmd)
99
100#define htw_stop()							\
101do {									\
102	unsigned long flags;						\
103									\
104	if (cpu_has_htw) {						\
105		local_irq_save(flags);					\
106		if(!raw_current_cpu_data.htw_seq++) {			\
107			write_c0_pwctl(read_c0_pwctl() &		\
108				       ~(1 << MIPS_PWCTL_PWEN_SHIFT));	\
109			back_to_back_c0_hazard();			\
110		}							\
111		local_irq_restore(flags);				\
112	}								\
113} while(0)
114
115#define htw_start()							\
116do {									\
117	unsigned long flags;						\
118									\
119	if (cpu_has_htw) {						\
120		local_irq_save(flags);					\
121		if (!--raw_current_cpu_data.htw_seq) {			\
122			write_c0_pwctl(read_c0_pwctl() |		\
123				       (1 << MIPS_PWCTL_PWEN_SHIFT));	\
124			back_to_back_c0_hazard();			\
125		}							\
126		local_irq_restore(flags);				\
127	}								\
128} while(0)
129
130static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
131			      pte_t *ptep, pte_t pteval);
132
133#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
134
135#define pte_none(pte)		(!(((pte).pte_high) & ~_PAGE_GLOBAL))
136#define pte_present(pte)	((pte).pte_low & _PAGE_PRESENT)
137#define pte_no_exec(pte)	((pte).pte_low & _PAGE_NO_EXEC)
138
139static inline void set_pte(pte_t *ptep, pte_t pte)
140{
141	ptep->pte_high = pte.pte_high;
142	smp_wmb();
143	ptep->pte_low = pte.pte_low;
144
145	if (pte.pte_high & _PAGE_GLOBAL) {
146		pte_t *buddy = ptep_buddy(ptep);
147		/*
148		 * Make sure the buddy is global too (if it's !none,
149		 * it better already be global)
150		 */
151		if (pte_none(*buddy))
152			buddy->pte_high |= _PAGE_GLOBAL;
153	}
154}
155
156static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
157{
158	pte_t null = __pte(0);
159
160	htw_stop();
161	/* Preserve global status for the pair */
162	if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
163		null.pte_high = _PAGE_GLOBAL;
164
165	set_pte_at(mm, addr, ptep, null);
166	htw_start();
167}
168#else
169
170#define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
171#define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
172#define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
173
174/*
175 * Certain architectures need to do special things when pte's
176 * within a page table are directly modified.  Thus, the following
177 * hook is made available.
178 */
179static inline void set_pte(pte_t *ptep, pte_t pteval)
180{
181	*ptep = pteval;
182#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
183	if (pte_val(pteval) & _PAGE_GLOBAL) {
184		pte_t *buddy = ptep_buddy(ptep);
185		/*
186		 * Make sure the buddy is global too (if it's !none,
187		 * it better already be global)
188		 */
189#ifdef CONFIG_SMP
190		/*
191		 * For SMP, multiple CPUs can race, so we need to do
192		 * this atomically.
193		 */
194#ifdef CONFIG_64BIT
195#define LL_INSN "lld"
196#define SC_INSN "scd"
197#else /* CONFIG_32BIT */
198#define LL_INSN "ll"
199#define SC_INSN "sc"
200#endif
201		unsigned long page_global = _PAGE_GLOBAL;
202		unsigned long tmp;
203
204		__asm__ __volatile__ (
205			"	.set	push\n"
206			"	.set	noreorder\n"
207			"1:	" LL_INSN "	%[tmp], %[buddy]\n"
208			"	bnez	%[tmp], 2f\n"
209			"	 or	%[tmp], %[tmp], %[global]\n"
210			"	" SC_INSN "	%[tmp], %[buddy]\n"
211			"	beqz	%[tmp], 1b\n"
212			"	 nop\n"
213			"2:\n"
214			"	.set pop"
215			: [buddy] "+m" (buddy->pte),
216			  [tmp] "=&r" (tmp)
217			: [global] "r" (page_global));
218#else /* !CONFIG_SMP */
219		if (pte_none(*buddy))
220			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
221#endif /* CONFIG_SMP */
222	}
223#endif
224}
225
226static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
227{
228	htw_stop();
229#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
230	/* Preserve global status for the pair */
231	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
232		set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
233	else
234#endif
235		set_pte_at(mm, addr, ptep, __pte(0));
236	htw_start();
237}
238#endif
239
240static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
241			      pte_t *ptep, pte_t pteval)
242{
243	extern void __update_cache(unsigned long address, pte_t pte);
244
245	if (!pte_present(pteval))
246		goto cache_sync_done;
247
248	if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
249		goto cache_sync_done;
250
251	__update_cache(addr, pteval);
252cache_sync_done:
253	set_pte(ptep, pteval);
254}
255
256/*
257 * (pmds are folded into puds so this doesn't get actually called,
258 * but the define is needed for a generic inline function.)
259 */
260#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
261
262#ifndef __PAGETABLE_PMD_FOLDED
263/*
264 * (puds are folded into pgds so this doesn't get actually called,
265 * but the define is needed for a generic inline function.)
266 */
267#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
268#endif
269
270#define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
271#define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
272#define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
273
274/*
275 * We used to declare this array with size but gcc 3.3 and older are not able
276 * to find that this expression is a constant, so the size is dropped.
277 */
278extern pgd_t swapper_pg_dir[];
279
280/*
281 * The following only work if pte_present() is true.
282 * Undefined behaviour if not..
283 */
284#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
285static inline int pte_write(pte_t pte)	{ return pte.pte_low & _PAGE_WRITE; }
286static inline int pte_dirty(pte_t pte)	{ return pte.pte_low & _PAGE_MODIFIED; }
287static inline int pte_young(pte_t pte)	{ return pte.pte_low & _PAGE_ACCESSED; }
288
289static inline pte_t pte_wrprotect(pte_t pte)
290{
291	pte.pte_low  &= ~_PAGE_WRITE;
292	pte.pte_high &= ~_PAGE_SILENT_WRITE;
293	return pte;
294}
295
296static inline pte_t pte_mkclean(pte_t pte)
297{
298	pte.pte_low  &= ~_PAGE_MODIFIED;
299	pte.pte_high &= ~_PAGE_SILENT_WRITE;
300	return pte;
301}
302
303static inline pte_t pte_mkold(pte_t pte)
304{
305	pte.pte_low  &= ~_PAGE_ACCESSED;
306	pte.pte_high &= ~_PAGE_SILENT_READ;
307	return pte;
308}
309
310static inline pte_t pte_mkwrite(pte_t pte)
311{
312	pte.pte_low |= _PAGE_WRITE;
313	if (pte.pte_low & _PAGE_MODIFIED)
314		pte.pte_high |= _PAGE_SILENT_WRITE;
315	return pte;
316}
317
318static inline pte_t pte_mkdirty(pte_t pte)
319{
320	pte.pte_low |= _PAGE_MODIFIED;
321	if (pte.pte_low & _PAGE_WRITE)
322		pte.pte_high |= _PAGE_SILENT_WRITE;
323	return pte;
324}
325
326static inline pte_t pte_mkyoung(pte_t pte)
327{
328	pte.pte_low |= _PAGE_ACCESSED;
329	if (pte.pte_low & _PAGE_READ)
330		pte.pte_high |= _PAGE_SILENT_READ;
331	return pte;
332}
333#else
334static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
335static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & _PAGE_MODIFIED; }
336static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
337
338static inline pte_t pte_wrprotect(pte_t pte)
339{
340	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
341	return pte;
342}
343
344static inline pte_t pte_mkclean(pte_t pte)
345{
346	pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
347	return pte;
348}
349
350static inline pte_t pte_mkold(pte_t pte)
351{
352	pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
353	return pte;
354}
355
356static inline pte_t pte_mkwrite(pte_t pte)
357{
358	pte_val(pte) |= _PAGE_WRITE;
359	if (pte_val(pte) & _PAGE_MODIFIED)
360		pte_val(pte) |= _PAGE_SILENT_WRITE;
361	return pte;
362}
363
364static inline pte_t pte_mkdirty(pte_t pte)
365{
366	pte_val(pte) |= _PAGE_MODIFIED;
367	if (pte_val(pte) & _PAGE_WRITE)
368		pte_val(pte) |= _PAGE_SILENT_WRITE;
369	return pte;
370}
371
372static inline pte_t pte_mkyoung(pte_t pte)
373{
374	pte_val(pte) |= _PAGE_ACCESSED;
375#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
376	if (!(pte_val(pte) & _PAGE_NO_READ))
377		pte_val(pte) |= _PAGE_SILENT_READ;
378	else
379#endif
380	if (pte_val(pte) & _PAGE_READ)
381		pte_val(pte) |= _PAGE_SILENT_READ;
382	return pte;
383}
384
385#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
386static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
387
388static inline pte_t pte_mkhuge(pte_t pte)
389{
390	pte_val(pte) |= _PAGE_HUGE;
391	return pte;
392}
393#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
394#endif
395static inline int pte_special(pte_t pte)	{ return 0; }
396static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
397
398/*
399 * Macro to make mark a page protection value as "uncacheable".	 Note
400 * that "protection" is really a misnomer here as the protection value
401 * contains the memory attribute bits, dirty bits, and various other
402 * bits as well.
403 */
404#define pgprot_noncached pgprot_noncached
405
406static inline pgprot_t pgprot_noncached(pgprot_t _prot)
407{
408	unsigned long prot = pgprot_val(_prot);
409
410	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
411
412	return __pgprot(prot);
413}
414
415static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
416{
417	unsigned long prot = pgprot_val(_prot);
418
419	/* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
420	prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
421
422	return __pgprot(prot);
423}
424
425/*
426 * Conversion functions: convert a page and protection to a page entry,
427 * and a page entry and page directory to the page they refer to.
428 */
429#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
430
431#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
432static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
433{
434	pte.pte_low  &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
435	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
436	pte.pte_low  |= pgprot_val(newprot) & ~_PFNX_MASK;
437	pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
438	return pte;
439}
440#else
441static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
442{
443	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
444}
445#endif
446
447
448extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
449	pte_t pte);
450
451static inline void update_mmu_cache(struct vm_area_struct *vma,
452	unsigned long address, pte_t *ptep)
453{
454	pte_t pte = *ptep;
455	__update_tlb(vma, address, pte);
456}
457
458static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
459	unsigned long address, pmd_t *pmdp)
460{
461	pte_t pte = *(pte_t *)pmdp;
462
463	__update_tlb(vma, address, pte);
464}
465
466#define kern_addr_valid(addr)	(1)
467
468#ifdef CONFIG_PHYS_ADDR_T_64BIT
469extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
470
471static inline int io_remap_pfn_range(struct vm_area_struct *vma,
472		unsigned long vaddr,
473		unsigned long pfn,
474		unsigned long size,
475		pgprot_t prot)
476{
477	phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
478	return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
479}
480#define io_remap_pfn_range io_remap_pfn_range
481#endif
482
483#ifdef CONFIG_TRANSPARENT_HUGEPAGE
484
485extern int has_transparent_hugepage(void);
486
487static inline int pmd_trans_huge(pmd_t pmd)
488{
489	return !!(pmd_val(pmd) & _PAGE_HUGE);
490}
491
492static inline pmd_t pmd_mkhuge(pmd_t pmd)
493{
494	pmd_val(pmd) |= _PAGE_HUGE;
495
496	return pmd;
497}
498
499static inline int pmd_trans_splitting(pmd_t pmd)
500{
501	return !!(pmd_val(pmd) & _PAGE_SPLITTING);
502}
503
504static inline pmd_t pmd_mksplitting(pmd_t pmd)
505{
506	pmd_val(pmd) |= _PAGE_SPLITTING;
507
508	return pmd;
509}
510
511extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
512		       pmd_t *pmdp, pmd_t pmd);
513
514#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
515/* Extern to avoid header file madness */
516extern void pmdp_splitting_flush(struct vm_area_struct *vma,
517					unsigned long address,
518					pmd_t *pmdp);
519
520#define __HAVE_ARCH_PMD_WRITE
521static inline int pmd_write(pmd_t pmd)
522{
523	return !!(pmd_val(pmd) & _PAGE_WRITE);
524}
525
526static inline pmd_t pmd_wrprotect(pmd_t pmd)
527{
528	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
529	return pmd;
530}
531
532static inline pmd_t pmd_mkwrite(pmd_t pmd)
533{
534	pmd_val(pmd) |= _PAGE_WRITE;
535	if (pmd_val(pmd) & _PAGE_MODIFIED)
536		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
537
538	return pmd;
539}
540
541static inline int pmd_dirty(pmd_t pmd)
542{
543	return !!(pmd_val(pmd) & _PAGE_MODIFIED);
544}
545
546static inline pmd_t pmd_mkclean(pmd_t pmd)
547{
548	pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
549	return pmd;
550}
551
552static inline pmd_t pmd_mkdirty(pmd_t pmd)
553{
554	pmd_val(pmd) |= _PAGE_MODIFIED;
555	if (pmd_val(pmd) & _PAGE_WRITE)
556		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
557
558	return pmd;
559}
560
561static inline int pmd_young(pmd_t pmd)
562{
563	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
564}
565
566static inline pmd_t pmd_mkold(pmd_t pmd)
567{
568	pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
569
570	return pmd;
571}
572
573static inline pmd_t pmd_mkyoung(pmd_t pmd)
574{
575	pmd_val(pmd) |= _PAGE_ACCESSED;
576
577#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
578	if (!(pmd_val(pmd) & _PAGE_NO_READ))
579		pmd_val(pmd) |= _PAGE_SILENT_READ;
580	else
581#endif
582	if (pmd_val(pmd) & _PAGE_READ)
583		pmd_val(pmd) |= _PAGE_SILENT_READ;
584
585	return pmd;
586}
587
588/* Extern to avoid header file madness */
589extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
590
591static inline unsigned long pmd_pfn(pmd_t pmd)
592{
593	return pmd_val(pmd) >> _PFN_SHIFT;
594}
595
596static inline struct page *pmd_page(pmd_t pmd)
597{
598	if (pmd_trans_huge(pmd))
599		return pfn_to_page(pmd_pfn(pmd));
600
601	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
602}
603
604static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
605{
606	pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
607	return pmd;
608}
609
610static inline pmd_t pmd_mknotpresent(pmd_t pmd)
611{
612	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
613
614	return pmd;
615}
616
617/*
618 * The generic version pmdp_get_and_clear uses a version of pmd_clear() with a
619 * different prototype.
620 */
621#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
622static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
623				       unsigned long address, pmd_t *pmdp)
624{
625	pmd_t old = *pmdp;
626
627	pmd_clear(pmdp);
628
629	return old;
630}
631
632#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
633
634#include <asm-generic/pgtable.h>
635
636/*
637 * uncached accelerated TLB map for video memory access
638 */
639#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
640#define __HAVE_PHYS_MEM_ACCESS_PROT
641
642struct file;
643pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
644		unsigned long size, pgprot_t vma_prot);
645int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
646		unsigned long size, pgprot_t *vma_prot);
647#endif
648
649/*
650 * We provide our own get_unmapped area to cope with the virtual aliasing
651 * constraints placed on us by the cache architecture.
652 */
653#define HAVE_ARCH_UNMAPPED_AREA
654#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
655
656/*
657 * No page table caches to initialise
658 */
659#define pgtable_cache_init()	do { } while (0)
660
661#endif /* _ASM_PGTABLE_H */
662