1/*
2 * arch/arm/include/asm/pgtable-3level.h
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifndef _ASM_PGTABLE_3LEVEL_H
21#define _ASM_PGTABLE_3LEVEL_H
22
23/*
24 * With LPAE, there are 3 levels of page tables. Each level has 512 entries of
25 * 8 bytes each, occupying a 4K page. The first level table covers a range of
26 * 512GB, each entry representing 1GB. Since we are limited to 4GB input
27 * address range, only 4 entries in the PGD are used.
28 *
29 * There are enough spare bits in a page table entry for the kernel specific
30 * state.
31 */
32#define PTRS_PER_PTE		512
33#define PTRS_PER_PMD		512
34#define PTRS_PER_PGD		4
35
36#define PTE_HWTABLE_PTRS	(0)
37#define PTE_HWTABLE_OFF		(0)
38#define PTE_HWTABLE_SIZE	(PTRS_PER_PTE * sizeof(u64))
39
40/*
41 * PGDIR_SHIFT determines the size a top-level page table entry can map.
42 */
43#define PGDIR_SHIFT		30
44
45/*
46 * PMD_SHIFT determines the size a middle-level page table entry can map.
47 */
48#define PMD_SHIFT		21
49
50#define PMD_SIZE		(1UL << PMD_SHIFT)
51#define PMD_MASK		(~((1 << PMD_SHIFT) - 1))
52#define PGDIR_SIZE		(1UL << PGDIR_SHIFT)
53#define PGDIR_MASK		(~((1 << PGDIR_SHIFT) - 1))
54
55/*
56 * section address mask and size definitions.
57 */
58#define SECTION_SHIFT		21
59#define SECTION_SIZE		(1UL << SECTION_SHIFT)
60#define SECTION_MASK		(~((1 << SECTION_SHIFT) - 1))
61
62#define USER_PTRS_PER_PGD	(PAGE_OFFSET / PGDIR_SIZE)
63
64/*
65 * Hugetlb definitions.
66 */
67#define HPAGE_SHIFT		PMD_SHIFT
68#define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
69#define HPAGE_MASK		(~(HPAGE_SIZE - 1))
70#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
71
72/*
73 * "Linux" PTE definitions for LPAE.
74 *
75 * These bits overlap with the hardware bits but the naming is preserved for
76 * consistency with the classic page table format.
77 */
78#define L_PTE_VALID		(_AT(pteval_t, 1) << 0)		/* Valid */
79#define L_PTE_PRESENT		(_AT(pteval_t, 3) << 0)		/* Present */
80#define L_PTE_USER		(_AT(pteval_t, 1) << 6)		/* AP[1] */
81#define L_PTE_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
82#define L_PTE_YOUNG		(_AT(pteval_t, 1) << 10)	/* AF */
83#define L_PTE_XN		(_AT(pteval_t, 1) << 54)	/* XN */
84#define L_PTE_DIRTY		(_AT(pteval_t, 1) << 55)
85#define L_PTE_SPECIAL		(_AT(pteval_t, 1) << 56)
86#define L_PTE_NONE		(_AT(pteval_t, 1) << 57)	/* PROT_NONE */
87#define L_PTE_RDONLY		(_AT(pteval_t, 1) << 58)	/* READ ONLY */
88
89#define L_PMD_SECT_VALID	(_AT(pmdval_t, 1) << 0)
90#define L_PMD_SECT_DIRTY	(_AT(pmdval_t, 1) << 55)
91#define L_PMD_SECT_SPLITTING	(_AT(pmdval_t, 1) << 56)
92#define L_PMD_SECT_NONE		(_AT(pmdval_t, 1) << 57)
93#define L_PMD_SECT_RDONLY	(_AT(pteval_t, 1) << 58)
94
95/*
96 * To be used in assembly code with the upper page attributes.
97 */
98#define L_PTE_XN_HIGH		(1 << (54 - 32))
99#define L_PTE_DIRTY_HIGH	(1 << (55 - 32))
100
101/*
102 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
103 */
104#define L_PTE_MT_UNCACHED	(_AT(pteval_t, 0) << 2)	/* strongly ordered */
105#define L_PTE_MT_BUFFERABLE	(_AT(pteval_t, 1) << 2)	/* normal non-cacheable */
106#define L_PTE_MT_WRITETHROUGH	(_AT(pteval_t, 2) << 2)	/* normal inner write-through */
107#define L_PTE_MT_WRITEBACK	(_AT(pteval_t, 3) << 2)	/* normal inner write-back */
108#define L_PTE_MT_WRITEALLOC	(_AT(pteval_t, 7) << 2)	/* normal inner write-alloc */
109#define L_PTE_MT_DEV_SHARED	(_AT(pteval_t, 4) << 2)	/* device */
110#define L_PTE_MT_DEV_NONSHARED	(_AT(pteval_t, 4) << 2)	/* device */
111#define L_PTE_MT_DEV_WC		(_AT(pteval_t, 1) << 2)	/* normal non-cacheable */
112#define L_PTE_MT_DEV_CACHED	(_AT(pteval_t, 3) << 2)	/* normal inner write-back */
113#define L_PTE_MT_MASK		(_AT(pteval_t, 7) << 2)
114
115/*
116 * Software PGD flags.
117 */
118#define L_PGD_SWAPPER		(_AT(pgdval_t, 1) << 55)	/* swapper_pg_dir entry */
119
120/*
121 * 2nd stage PTE definitions for LPAE.
122 */
123#define L_PTE_S2_MT_UNCACHED		(_AT(pteval_t, 0x0) << 2) /* strongly ordered */
124#define L_PTE_S2_MT_WRITETHROUGH	(_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
125#define L_PTE_S2_MT_WRITEBACK		(_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
126#define L_PTE_S2_MT_DEV_SHARED		(_AT(pteval_t, 0x1) << 2) /* device */
127#define L_PTE_S2_MT_MASK		(_AT(pteval_t, 0xf) << 2)
128
129#define L_PTE_S2_RDONLY			(_AT(pteval_t, 1) << 6)   /* HAP[1]   */
130#define L_PTE_S2_RDWR			(_AT(pteval_t, 3) << 6)   /* HAP[2:1] */
131
132#define L_PMD_S2_RDONLY			(_AT(pmdval_t, 1) << 6)   /* HAP[1]   */
133#define L_PMD_S2_RDWR			(_AT(pmdval_t, 3) << 6)   /* HAP[2:1] */
134
135/*
136 * Hyp-mode PL2 PTE definitions for LPAE.
137 */
138#define L_PTE_HYP		L_PTE_USER
139
140#ifndef __ASSEMBLY__
141
142#define pud_none(pud)		(!pud_val(pud))
143#define pud_bad(pud)		(!(pud_val(pud) & 2))
144#define pud_present(pud)	(pud_val(pud))
145#define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
146						 PMD_TYPE_TABLE)
147#define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
148						 PMD_TYPE_SECT)
149#define pmd_large(pmd)		pmd_sect(pmd)
150
151#define pud_clear(pudp)			\
152	do {				\
153		*pudp = __pud(0);	\
154		clean_pmd_entry(pudp);	\
155	} while (0)
156
157#define set_pud(pudp, pud)		\
158	do {				\
159		*pudp = pud;		\
160		flush_pmd_entry(pudp);	\
161	} while (0)
162
163static inline pmd_t *pud_page_vaddr(pud_t pud)
164{
165	return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
166}
167
168/* Find an entry in the second-level page table.. */
169#define pmd_index(addr)		(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
170static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
171{
172	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
173}
174
175#define pmd_bad(pmd)		(!(pmd_val(pmd) & 2))
176
177#define copy_pmd(pmdpd,pmdps)		\
178	do {				\
179		*pmdpd = *pmdps;	\
180		flush_pmd_entry(pmdpd);	\
181	} while (0)
182
183#define pmd_clear(pmdp)			\
184	do {				\
185		*pmdp = __pmd(0);	\
186		clean_pmd_entry(pmdp);	\
187	} while (0)
188
189/*
190 * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
191 * that are written to a page table but not for ptes created with mk_pte.
192 *
193 * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to
194 * hugetlb_cow, where it is compared with an entry in a page table.
195 * This comparison test fails erroneously leading ultimately to a memory leak.
196 *
197 * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
198 * present before running the comparison.
199 */
200#define __HAVE_ARCH_PTE_SAME
201#define pte_same(pte_a,pte_b)	((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG	\
202					: pte_val(pte_a))				\
203				== (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG	\
204					: pte_val(pte_b)))
205
206#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
207
208#define pte_huge(pte)		(pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
209#define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))
210
211#define pmd_isset(pmd, val)	((u32)(val) == (val) ? pmd_val(pmd) & (val) \
212						: !!(pmd_val(pmd) & (val)))
213#define pmd_isclear(pmd, val)	(!(pmd_val(pmd) & (val)))
214
215#define pmd_young(pmd)		(pmd_isset((pmd), PMD_SECT_AF))
216#define pte_special(pte)	(pte_isset((pte), L_PTE_SPECIAL))
217static inline pte_t pte_mkspecial(pte_t pte)
218{
219	pte_val(pte) |= L_PTE_SPECIAL;
220	return pte;
221}
222#define	__HAVE_ARCH_PTE_SPECIAL
223
224#define __HAVE_ARCH_PMD_WRITE
225#define pmd_write(pmd)		(pmd_isclear((pmd), L_PMD_SECT_RDONLY))
226#define pmd_dirty(pmd)		(pmd_isset((pmd), L_PMD_SECT_DIRTY))
227#define pud_page(pud)		pmd_page(__pmd(pud_val(pud)))
228#define pud_write(pud)		pmd_write(__pmd(pud_val(pud)))
229
230#define pmd_hugewillfault(pmd)	(!pmd_young(pmd) || !pmd_write(pmd))
231#define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))
232
233#ifdef CONFIG_TRANSPARENT_HUGEPAGE
234#define pmd_trans_huge(pmd)	(pmd_val(pmd) && !pmd_table(pmd))
235#define pmd_trans_splitting(pmd) (pmd_isset((pmd), L_PMD_SECT_SPLITTING))
236
237#ifdef CONFIG_HAVE_RCU_TABLE_FREE
238#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
239void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
240			  pmd_t *pmdp);
241#endif
242#endif
243
244#define PMD_BIT_FUNC(fn,op) \
245static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
246
247PMD_BIT_FUNC(wrprotect,	|= L_PMD_SECT_RDONLY);
248PMD_BIT_FUNC(mkold,	&= ~PMD_SECT_AF);
249PMD_BIT_FUNC(mksplitting, |= L_PMD_SECT_SPLITTING);
250PMD_BIT_FUNC(mkwrite,   &= ~L_PMD_SECT_RDONLY);
251PMD_BIT_FUNC(mkdirty,   |= L_PMD_SECT_DIRTY);
252PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
253
254#define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
255
256#define pmd_pfn(pmd)		(((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
257#define pfn_pmd(pfn,prot)	(__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
258#define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
259
260/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
261static inline pmd_t pmd_mknotpresent(pmd_t pmd)
262{
263	return __pmd(0);
264}
265
266static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
267{
268	const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | L_PMD_SECT_RDONLY |
269				L_PMD_SECT_VALID | L_PMD_SECT_NONE;
270	pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
271	return pmd;
272}
273
274static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
275			      pmd_t *pmdp, pmd_t pmd)
276{
277	BUG_ON(addr >= TASK_SIZE);
278
279	/* create a faulting entry if PROT_NONE protected */
280	if (pmd_val(pmd) & L_PMD_SECT_NONE)
281		pmd_val(pmd) &= ~L_PMD_SECT_VALID;
282
283	if (pmd_write(pmd) && pmd_dirty(pmd))
284		pmd_val(pmd) &= ~PMD_SECT_AP2;
285	else
286		pmd_val(pmd) |= PMD_SECT_AP2;
287
288	*pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
289	flush_pmd_entry(pmdp);
290}
291
292static inline int has_transparent_hugepage(void)
293{
294	return 1;
295}
296
297#endif /* __ASSEMBLY__ */
298
299#endif /* _ASM_PGTABLE_3LEVEL_H */
300