1/*
2 * CPU-agnostic ARM page table allocator.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
22
23#include <linux/iommu.h>
24#include <linux/kernel.h>
25#include <linux/sizes.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28
29#include "io-pgtable.h"
30
31#define ARM_LPAE_MAX_ADDR_BITS		48
32#define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
33#define ARM_LPAE_MAX_LEVELS		4
34
35/* Struct accessors */
36#define io_pgtable_to_data(x)						\
37	container_of((x), struct arm_lpae_io_pgtable, iop)
38
39#define io_pgtable_ops_to_pgtable(x)					\
40	container_of((x), struct io_pgtable, ops)
41
42#define io_pgtable_ops_to_data(x)					\
43	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
44
45/*
46 * For consistency with the architecture, we always consider
47 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
48 */
49#define ARM_LPAE_START_LVL(d)		(ARM_LPAE_MAX_LEVELS - (d)->levels)
50
51/*
52 * Calculate the right shift amount to get to the portion describing level l
53 * in a virtual address mapped by the pagetable in d.
54 */
55#define ARM_LPAE_LVL_SHIFT(l,d)						\
56	((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1))		\
57	  * (d)->bits_per_level) + (d)->pg_shift)
58
59#define ARM_LPAE_PAGES_PER_PGD(d)					\
60	DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
61
62/*
63 * Calculate the index at level l used to map virtual address a using the
64 * pagetable in d.
65 */
66#define ARM_LPAE_PGD_IDX(l,d)						\
67	((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
68
69#define ARM_LPAE_LVL_IDX(a,l,d)						\
70	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
71	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
72
73/* Calculate the block/page mapping size at level l for pagetable in d. */
74#define ARM_LPAE_BLOCK_SIZE(l,d)					\
75	(1 << (ilog2(sizeof(arm_lpae_iopte)) +				\
76		((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
77
78/* Page table bits */
79#define ARM_LPAE_PTE_TYPE_SHIFT		0
80#define ARM_LPAE_PTE_TYPE_MASK		0x3
81
82#define ARM_LPAE_PTE_TYPE_BLOCK		1
83#define ARM_LPAE_PTE_TYPE_TABLE		3
84#define ARM_LPAE_PTE_TYPE_PAGE		3
85
86#define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
87#define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
88#define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
89#define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
90#define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
91#define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
92#define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
93#define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
94
95#define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
96/* Ignore the contiguous bit for block splitting */
97#define ARM_LPAE_PTE_ATTR_HI_MASK	(((arm_lpae_iopte)6) << 52)
98#define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
99					 ARM_LPAE_PTE_ATTR_HI_MASK)
100
101/* Stage-1 PTE */
102#define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
103#define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
104#define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
105#define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
106
107/* Stage-2 PTE */
108#define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
109#define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
110#define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
111#define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
112#define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
113#define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
114
115/* Register bits */
116#define ARM_32_LPAE_TCR_EAE		(1 << 31)
117#define ARM_64_LPAE_S2_TCR_RES1		(1 << 31)
118
119#define ARM_LPAE_TCR_EPD1		(1 << 23)
120
121#define ARM_LPAE_TCR_TG0_4K		(0 << 14)
122#define ARM_LPAE_TCR_TG0_64K		(1 << 14)
123#define ARM_LPAE_TCR_TG0_16K		(2 << 14)
124
125#define ARM_LPAE_TCR_SH0_SHIFT		12
126#define ARM_LPAE_TCR_SH0_MASK		0x3
127#define ARM_LPAE_TCR_SH_NS		0
128#define ARM_LPAE_TCR_SH_OS		2
129#define ARM_LPAE_TCR_SH_IS		3
130
131#define ARM_LPAE_TCR_ORGN0_SHIFT	10
132#define ARM_LPAE_TCR_IRGN0_SHIFT	8
133#define ARM_LPAE_TCR_RGN_MASK		0x3
134#define ARM_LPAE_TCR_RGN_NC		0
135#define ARM_LPAE_TCR_RGN_WBWA		1
136#define ARM_LPAE_TCR_RGN_WT		2
137#define ARM_LPAE_TCR_RGN_WB		3
138
139#define ARM_LPAE_TCR_SL0_SHIFT		6
140#define ARM_LPAE_TCR_SL0_MASK		0x3
141
142#define ARM_LPAE_TCR_T0SZ_SHIFT		0
143#define ARM_LPAE_TCR_SZ_MASK		0xf
144
145#define ARM_LPAE_TCR_PS_SHIFT		16
146#define ARM_LPAE_TCR_PS_MASK		0x7
147
148#define ARM_LPAE_TCR_IPS_SHIFT		32
149#define ARM_LPAE_TCR_IPS_MASK		0x7
150
151#define ARM_LPAE_TCR_PS_32_BIT		0x0ULL
152#define ARM_LPAE_TCR_PS_36_BIT		0x1ULL
153#define ARM_LPAE_TCR_PS_40_BIT		0x2ULL
154#define ARM_LPAE_TCR_PS_42_BIT		0x3ULL
155#define ARM_LPAE_TCR_PS_44_BIT		0x4ULL
156#define ARM_LPAE_TCR_PS_48_BIT		0x5ULL
157
158#define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
159#define ARM_LPAE_MAIR_ATTR_MASK		0xff
160#define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
161#define ARM_LPAE_MAIR_ATTR_NC		0x44
162#define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
163#define ARM_LPAE_MAIR_ATTR_IDX_NC	0
164#define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
165#define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
166
167/* IOPTE accessors */
168#define iopte_deref(pte,d)					\
169	(__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)	\
170	& ~((1ULL << (d)->pg_shift) - 1)))
171
172#define iopte_type(pte,l)					\
173	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
174
175#define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
176
177#define iopte_leaf(pte,l)					\
178	(l == (ARM_LPAE_MAX_LEVELS - 1) ?			\
179		(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) :	\
180		(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
181
182#define iopte_to_pfn(pte,d)					\
183	(((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
184
185#define pfn_to_iopte(pfn,d)					\
186	(((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
187
188struct arm_lpae_io_pgtable {
189	struct io_pgtable	iop;
190
191	int			levels;
192	size_t			pgd_size;
193	unsigned long		pg_shift;
194	unsigned long		bits_per_level;
195
196	void			*pgd;
197};
198
199typedef u64 arm_lpae_iopte;
200
201static bool selftest_running = false;
202
203static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
204			    unsigned long iova, size_t size, int lvl,
205			    arm_lpae_iopte *ptep);
206
207static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
208			     unsigned long iova, phys_addr_t paddr,
209			     arm_lpae_iopte prot, int lvl,
210			     arm_lpae_iopte *ptep)
211{
212	arm_lpae_iopte pte = prot;
213
214	if (iopte_leaf(*ptep, lvl)) {
215		/* We require an unmap first */
216		WARN_ON(!selftest_running);
217		return -EEXIST;
218	} else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
219		/*
220		 * We need to unmap and free the old table before
221		 * overwriting it with a block entry.
222		 */
223		arm_lpae_iopte *tblp;
224		size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
225
226		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
227		if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
228			return -EINVAL;
229	}
230
231	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
232		pte |= ARM_LPAE_PTE_NS;
233
234	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
235		pte |= ARM_LPAE_PTE_TYPE_PAGE;
236	else
237		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
238
239	pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
240	pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
241
242	*ptep = pte;
243	data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
244	return 0;
245}
246
247static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
248			  phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
249			  int lvl, arm_lpae_iopte *ptep)
250{
251	arm_lpae_iopte *cptep, pte;
252	void *cookie = data->iop.cookie;
253	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
254
255	/* Find our entry at the current level */
256	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
257
258	/* If we can install a leaf entry at this level, then do so */
259	if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
260		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
261
262	/* We can't allocate tables at the final level */
263	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
264		return -EINVAL;
265
266	/* Grab a pointer to the next level */
267	pte = *ptep;
268	if (!pte) {
269		cptep = alloc_pages_exact(1UL << data->pg_shift,
270					 GFP_ATOMIC | __GFP_ZERO);
271		if (!cptep)
272			return -ENOMEM;
273
274		data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
275						 cookie);
276		pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
277		if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
278			pte |= ARM_LPAE_PTE_NSTABLE;
279		*ptep = pte;
280		data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
281	} else {
282		cptep = iopte_deref(pte, data);
283	}
284
285	/* Rinse, repeat */
286	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
287}
288
289static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
290					   int prot)
291{
292	arm_lpae_iopte pte;
293
294	if (data->iop.fmt == ARM_64_LPAE_S1 ||
295	    data->iop.fmt == ARM_32_LPAE_S1) {
296		pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
297
298		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
299			pte |= ARM_LPAE_PTE_AP_RDONLY;
300
301		if (prot & IOMMU_CACHE)
302			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
303				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
304	} else {
305		pte = ARM_LPAE_PTE_HAP_FAULT;
306		if (prot & IOMMU_READ)
307			pte |= ARM_LPAE_PTE_HAP_READ;
308		if (prot & IOMMU_WRITE)
309			pte |= ARM_LPAE_PTE_HAP_WRITE;
310		if (prot & IOMMU_CACHE)
311			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
312		else
313			pte |= ARM_LPAE_PTE_MEMATTR_NC;
314	}
315
316	if (prot & IOMMU_NOEXEC)
317		pte |= ARM_LPAE_PTE_XN;
318
319	return pte;
320}
321
322static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
323			phys_addr_t paddr, size_t size, int iommu_prot)
324{
325	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
326	arm_lpae_iopte *ptep = data->pgd;
327	int lvl = ARM_LPAE_START_LVL(data);
328	arm_lpae_iopte prot;
329
330	/* If no access, then nothing to do */
331	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
332		return 0;
333
334	prot = arm_lpae_prot_to_pte(data, iommu_prot);
335	return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
336}
337
338static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
339				    arm_lpae_iopte *ptep)
340{
341	arm_lpae_iopte *start, *end;
342	unsigned long table_size;
343
344	if (lvl == ARM_LPAE_START_LVL(data))
345		table_size = data->pgd_size;
346	else
347		table_size = 1UL << data->pg_shift;
348
349	start = ptep;
350
351	/* Only leaf entries at the last level */
352	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
353		end = ptep;
354	else
355		end = (void *)ptep + table_size;
356
357	while (ptep != end) {
358		arm_lpae_iopte pte = *ptep++;
359
360		if (!pte || iopte_leaf(pte, lvl))
361			continue;
362
363		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
364	}
365
366	free_pages_exact(start, table_size);
367}
368
369static void arm_lpae_free_pgtable(struct io_pgtable *iop)
370{
371	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
372
373	__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
374	kfree(data);
375}
376
377static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
378				    unsigned long iova, size_t size,
379				    arm_lpae_iopte prot, int lvl,
380				    arm_lpae_iopte *ptep, size_t blk_size)
381{
382	unsigned long blk_start, blk_end;
383	phys_addr_t blk_paddr;
384	arm_lpae_iopte table = 0;
385	void *cookie = data->iop.cookie;
386	const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
387
388	blk_start = iova & ~(blk_size - 1);
389	blk_end = blk_start + blk_size;
390	blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
391
392	for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
393		arm_lpae_iopte *tablep;
394
395		/* Unmap! */
396		if (blk_start == iova)
397			continue;
398
399		/* __arm_lpae_map expects a pointer to the start of the table */
400		tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
401		if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
402				   tablep) < 0) {
403			if (table) {
404				/* Free the table we allocated */
405				tablep = iopte_deref(table, data);
406				__arm_lpae_free_pgtable(data, lvl + 1, tablep);
407			}
408			return 0; /* Bytes unmapped */
409		}
410	}
411
412	*ptep = table;
413	tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
414	iova &= ~(blk_size - 1);
415	tlb->tlb_add_flush(iova, blk_size, true, cookie);
416	return size;
417}
418
419static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
420			    unsigned long iova, size_t size, int lvl,
421			    arm_lpae_iopte *ptep)
422{
423	arm_lpae_iopte pte;
424	const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
425	void *cookie = data->iop.cookie;
426	size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
427
428	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
429	pte = *ptep;
430
431	/* Something went horribly wrong and we ran out of page table */
432	if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
433		return 0;
434
435	/* If the size matches this level, we're in the right place */
436	if (size == blk_size) {
437		*ptep = 0;
438		tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
439
440		if (!iopte_leaf(pte, lvl)) {
441			/* Also flush any partial walks */
442			tlb->tlb_add_flush(iova, size, false, cookie);
443			tlb->tlb_sync(data->iop.cookie);
444			ptep = iopte_deref(pte, data);
445			__arm_lpae_free_pgtable(data, lvl + 1, ptep);
446		} else {
447			tlb->tlb_add_flush(iova, size, true, cookie);
448		}
449
450		return size;
451	} else if (iopte_leaf(pte, lvl)) {
452		/*
453		 * Insert a table at the next level to map the old region,
454		 * minus the part we want to unmap
455		 */
456		return arm_lpae_split_blk_unmap(data, iova, size,
457						iopte_prot(pte), lvl, ptep,
458						blk_size);
459	}
460
461	/* Keep on walkin' */
462	ptep = iopte_deref(pte, data);
463	return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
464}
465
466static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
467			  size_t size)
468{
469	size_t unmapped;
470	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
471	struct io_pgtable *iop = &data->iop;
472	arm_lpae_iopte *ptep = data->pgd;
473	int lvl = ARM_LPAE_START_LVL(data);
474
475	unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
476	if (unmapped)
477		iop->cfg.tlb->tlb_sync(iop->cookie);
478
479	return unmapped;
480}
481
482static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
483					 unsigned long iova)
484{
485	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
486	arm_lpae_iopte pte, *ptep = data->pgd;
487	int lvl = ARM_LPAE_START_LVL(data);
488
489	do {
490		/* Valid IOPTE pointer? */
491		if (!ptep)
492			return 0;
493
494		/* Grab the IOPTE we're interested in */
495		pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
496
497		/* Valid entry? */
498		if (!pte)
499			return 0;
500
501		/* Leaf entry? */
502		if (iopte_leaf(pte,lvl))
503			goto found_translation;
504
505		/* Take it to the next level */
506		ptep = iopte_deref(pte, data);
507	} while (++lvl < ARM_LPAE_MAX_LEVELS);
508
509	/* Ran out of page tables to walk */
510	return 0;
511
512found_translation:
513	iova &= ((1 << data->pg_shift) - 1);
514	return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
515}
516
517static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
518{
519	unsigned long granule;
520
521	/*
522	 * We need to restrict the supported page sizes to match the
523	 * translation regime for a particular granule. Aim to match
524	 * the CPU page size if possible, otherwise prefer smaller sizes.
525	 * While we're at it, restrict the block sizes to match the
526	 * chosen granule.
527	 */
528	if (cfg->pgsize_bitmap & PAGE_SIZE)
529		granule = PAGE_SIZE;
530	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
531		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
532	else if (cfg->pgsize_bitmap & PAGE_MASK)
533		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
534	else
535		granule = 0;
536
537	switch (granule) {
538	case SZ_4K:
539		cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
540		break;
541	case SZ_16K:
542		cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
543		break;
544	case SZ_64K:
545		cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
546		break;
547	default:
548		cfg->pgsize_bitmap = 0;
549	}
550}
551
552static struct arm_lpae_io_pgtable *
553arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
554{
555	unsigned long va_bits, pgd_bits;
556	struct arm_lpae_io_pgtable *data;
557
558	arm_lpae_restrict_pgsizes(cfg);
559
560	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
561		return NULL;
562
563	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
564		return NULL;
565
566	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
567		return NULL;
568
569	data = kmalloc(sizeof(*data), GFP_KERNEL);
570	if (!data)
571		return NULL;
572
573	data->pg_shift = __ffs(cfg->pgsize_bitmap);
574	data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
575
576	va_bits = cfg->ias - data->pg_shift;
577	data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
578
579	/* Calculate the actual size of our pgd (without concatenation) */
580	pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
581	data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
582
583	data->iop.ops = (struct io_pgtable_ops) {
584		.map		= arm_lpae_map,
585		.unmap		= arm_lpae_unmap,
586		.iova_to_phys	= arm_lpae_iova_to_phys,
587	};
588
589	return data;
590}
591
592static struct io_pgtable *
593arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
594{
595	u64 reg;
596	struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
597
598	if (!data)
599		return NULL;
600
601	/* TCR */
602	reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
603	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
604	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
605
606	switch (1 << data->pg_shift) {
607	case SZ_4K:
608		reg |= ARM_LPAE_TCR_TG0_4K;
609		break;
610	case SZ_16K:
611		reg |= ARM_LPAE_TCR_TG0_16K;
612		break;
613	case SZ_64K:
614		reg |= ARM_LPAE_TCR_TG0_64K;
615		break;
616	}
617
618	switch (cfg->oas) {
619	case 32:
620		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
621		break;
622	case 36:
623		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
624		break;
625	case 40:
626		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
627		break;
628	case 42:
629		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
630		break;
631	case 44:
632		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
633		break;
634	case 48:
635		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
636		break;
637	default:
638		goto out_free_data;
639	}
640
641	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
642
643	/* Disable speculative walks through TTBR1 */
644	reg |= ARM_LPAE_TCR_EPD1;
645	cfg->arm_lpae_s1_cfg.tcr = reg;
646
647	/* MAIRs */
648	reg = (ARM_LPAE_MAIR_ATTR_NC
649	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
650	      (ARM_LPAE_MAIR_ATTR_WBRWA
651	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
652	      (ARM_LPAE_MAIR_ATTR_DEVICE
653	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
654
655	cfg->arm_lpae_s1_cfg.mair[0] = reg;
656	cfg->arm_lpae_s1_cfg.mair[1] = 0;
657
658	/* Looking good; allocate a pgd */
659	data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
660	if (!data->pgd)
661		goto out_free_data;
662
663	cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
664
665	/* TTBRs */
666	cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
667	cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
668	return &data->iop;
669
670out_free_data:
671	kfree(data);
672	return NULL;
673}
674
675static struct io_pgtable *
676arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
677{
678	u64 reg, sl;
679	struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
680
681	if (!data)
682		return NULL;
683
684	/*
685	 * Concatenate PGDs at level 1 if possible in order to reduce
686	 * the depth of the stage-2 walk.
687	 */
688	if (data->levels == ARM_LPAE_MAX_LEVELS) {
689		unsigned long pgd_pages;
690
691		pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
692		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
693			data->pgd_size = pgd_pages << data->pg_shift;
694			data->levels--;
695		}
696	}
697
698	/* VTCR */
699	reg = ARM_64_LPAE_S2_TCR_RES1 |
700	     (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
701	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
702	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
703
704	sl = ARM_LPAE_START_LVL(data);
705
706	switch (1 << data->pg_shift) {
707	case SZ_4K:
708		reg |= ARM_LPAE_TCR_TG0_4K;
709		sl++; /* SL0 format is different for 4K granule size */
710		break;
711	case SZ_16K:
712		reg |= ARM_LPAE_TCR_TG0_16K;
713		break;
714	case SZ_64K:
715		reg |= ARM_LPAE_TCR_TG0_64K;
716		break;
717	}
718
719	switch (cfg->oas) {
720	case 32:
721		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
722		break;
723	case 36:
724		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
725		break;
726	case 40:
727		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
728		break;
729	case 42:
730		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
731		break;
732	case 44:
733		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
734		break;
735	case 48:
736		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
737		break;
738	default:
739		goto out_free_data;
740	}
741
742	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
743	reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
744	cfg->arm_lpae_s2_cfg.vtcr = reg;
745
746	/* Allocate pgd pages */
747	data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
748	if (!data->pgd)
749		goto out_free_data;
750
751	cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
752
753	/* VTTBR */
754	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
755	return &data->iop;
756
757out_free_data:
758	kfree(data);
759	return NULL;
760}
761
762static struct io_pgtable *
763arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
764{
765	struct io_pgtable *iop;
766
767	if (cfg->ias > 32 || cfg->oas > 40)
768		return NULL;
769
770	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
771	iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
772	if (iop) {
773		cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
774		cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
775	}
776
777	return iop;
778}
779
780static struct io_pgtable *
781arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
782{
783	struct io_pgtable *iop;
784
785	if (cfg->ias > 40 || cfg->oas > 40)
786		return NULL;
787
788	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
789	iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
790	if (iop)
791		cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
792
793	return iop;
794}
795
796struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
797	.alloc	= arm_64_lpae_alloc_pgtable_s1,
798	.free	= arm_lpae_free_pgtable,
799};
800
801struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
802	.alloc	= arm_64_lpae_alloc_pgtable_s2,
803	.free	= arm_lpae_free_pgtable,
804};
805
806struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
807	.alloc	= arm_32_lpae_alloc_pgtable_s1,
808	.free	= arm_lpae_free_pgtable,
809};
810
811struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
812	.alloc	= arm_32_lpae_alloc_pgtable_s2,
813	.free	= arm_lpae_free_pgtable,
814};
815
816#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
817
818static struct io_pgtable_cfg *cfg_cookie;
819
820static void dummy_tlb_flush_all(void *cookie)
821{
822	WARN_ON(cookie != cfg_cookie);
823}
824
825static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
826				void *cookie)
827{
828	WARN_ON(cookie != cfg_cookie);
829	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
830}
831
832static void dummy_tlb_sync(void *cookie)
833{
834	WARN_ON(cookie != cfg_cookie);
835}
836
837static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
838{
839	WARN_ON(cookie != cfg_cookie);
840}
841
842static struct iommu_gather_ops dummy_tlb_ops __initdata = {
843	.tlb_flush_all	= dummy_tlb_flush_all,
844	.tlb_add_flush	= dummy_tlb_add_flush,
845	.tlb_sync	= dummy_tlb_sync,
846	.flush_pgtable	= dummy_flush_pgtable,
847};
848
849static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
850{
851	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
852	struct io_pgtable_cfg *cfg = &data->iop.cfg;
853
854	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
855		cfg->pgsize_bitmap, cfg->ias);
856	pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
857		data->levels, data->pgd_size, data->pg_shift,
858		data->bits_per_level, data->pgd);
859}
860
861#define __FAIL(ops, i)	({						\
862		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
863		arm_lpae_dump_ops(ops);					\
864		selftest_running = false;				\
865		-EFAULT;						\
866})
867
868static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
869{
870	static const enum io_pgtable_fmt fmts[] = {
871		ARM_64_LPAE_S1,
872		ARM_64_LPAE_S2,
873	};
874
875	int i, j;
876	unsigned long iova;
877	size_t size;
878	struct io_pgtable_ops *ops;
879
880	selftest_running = true;
881
882	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
883		cfg_cookie = cfg;
884		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
885		if (!ops) {
886			pr_err("selftest: failed to allocate io pgtable ops\n");
887			return -ENOMEM;
888		}
889
890		/*
891		 * Initial sanity checks.
892		 * Empty page tables shouldn't provide any translations.
893		 */
894		if (ops->iova_to_phys(ops, 42))
895			return __FAIL(ops, i);
896
897		if (ops->iova_to_phys(ops, SZ_1G + 42))
898			return __FAIL(ops, i);
899
900		if (ops->iova_to_phys(ops, SZ_2G + 42))
901			return __FAIL(ops, i);
902
903		/*
904		 * Distinct mappings of different granule sizes.
905		 */
906		iova = 0;
907		j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
908		while (j != BITS_PER_LONG) {
909			size = 1UL << j;
910
911			if (ops->map(ops, iova, iova, size, IOMMU_READ |
912							    IOMMU_WRITE |
913							    IOMMU_NOEXEC |
914							    IOMMU_CACHE))
915				return __FAIL(ops, i);
916
917			/* Overlapping mappings */
918			if (!ops->map(ops, iova, iova + size, size,
919				      IOMMU_READ | IOMMU_NOEXEC))
920				return __FAIL(ops, i);
921
922			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
923				return __FAIL(ops, i);
924
925			iova += SZ_1G;
926			j++;
927			j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
928		}
929
930		/* Partial unmap */
931		size = 1UL << __ffs(cfg->pgsize_bitmap);
932		if (ops->unmap(ops, SZ_1G + size, size) != size)
933			return __FAIL(ops, i);
934
935		/* Remap of partial unmap */
936		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
937			return __FAIL(ops, i);
938
939		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
940			return __FAIL(ops, i);
941
942		/* Full unmap */
943		iova = 0;
944		j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
945		while (j != BITS_PER_LONG) {
946			size = 1UL << j;
947
948			if (ops->unmap(ops, iova, size) != size)
949				return __FAIL(ops, i);
950
951			if (ops->iova_to_phys(ops, iova + 42))
952				return __FAIL(ops, i);
953
954			/* Remap full block */
955			if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
956				return __FAIL(ops, i);
957
958			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
959				return __FAIL(ops, i);
960
961			iova += SZ_1G;
962			j++;
963			j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
964		}
965
966		free_io_pgtable_ops(ops);
967	}
968
969	selftest_running = false;
970	return 0;
971}
972
973static int __init arm_lpae_do_selftests(void)
974{
975	static const unsigned long pgsize[] = {
976		SZ_4K | SZ_2M | SZ_1G,
977		SZ_16K | SZ_32M,
978		SZ_64K | SZ_512M,
979	};
980
981	static const unsigned int ias[] = {
982		32, 36, 40, 42, 44, 48,
983	};
984
985	int i, j, pass = 0, fail = 0;
986	struct io_pgtable_cfg cfg = {
987		.tlb = &dummy_tlb_ops,
988		.oas = 48,
989	};
990
991	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
992		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
993			cfg.pgsize_bitmap = pgsize[i];
994			cfg.ias = ias[j];
995			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
996				pgsize[i], ias[j]);
997			if (arm_lpae_run_tests(&cfg))
998				fail++;
999			else
1000				pass++;
1001		}
1002	}
1003
1004	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1005	return fail ? -EFAULT : 0;
1006}
1007subsys_initcall(arm_lpae_do_selftests);
1008#endif
1009