1/*
2 * Copyright (C) 2011-2014 NVIDIA CORPORATION.  All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/bitops.h>
10#include <linux/debugfs.h>
11#include <linux/err.h>
12#include <linux/iommu.h>
13#include <linux/kernel.h>
14#include <linux/of.h>
15#include <linux/of_device.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18
19#include <soc/tegra/ahb.h>
20#include <soc/tegra/mc.h>
21
22struct tegra_smmu {
23	void __iomem *regs;
24	struct device *dev;
25
26	struct tegra_mc *mc;
27	const struct tegra_smmu_soc *soc;
28
29	unsigned long pfn_mask;
30	unsigned long tlb_mask;
31
32	unsigned long *asids;
33	struct mutex lock;
34
35	struct list_head list;
36
37	struct dentry *debugfs;
38};
39
40struct tegra_smmu_as {
41	struct iommu_domain domain;
42	struct tegra_smmu *smmu;
43	unsigned int use_count;
44	u32 *count;
45	struct page **pts;
46	struct page *pd;
47	dma_addr_t pd_dma;
48	unsigned id;
49	u32 attr;
50};
51
52static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
53{
54	return container_of(dom, struct tegra_smmu_as, domain);
55}
56
57static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
58			       unsigned long offset)
59{
60	writel(value, smmu->regs + offset);
61}
62
63static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
64{
65	return readl(smmu->regs + offset);
66}
67
68#define SMMU_CONFIG 0x010
69#define  SMMU_CONFIG_ENABLE (1 << 0)
70
71#define SMMU_TLB_CONFIG 0x14
72#define  SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
73#define  SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
74#define  SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
75	((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
76
77#define SMMU_PTC_CONFIG 0x18
78#define  SMMU_PTC_CONFIG_ENABLE (1 << 29)
79#define  SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
80#define  SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
81
82#define SMMU_PTB_ASID 0x01c
83#define  SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
84
85#define SMMU_PTB_DATA 0x020
86#define  SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
87
88#define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
89
90#define SMMU_TLB_FLUSH 0x030
91#define  SMMU_TLB_FLUSH_VA_MATCH_ALL     (0 << 0)
92#define  SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
93#define  SMMU_TLB_FLUSH_VA_MATCH_GROUP   (3 << 0)
94#define  SMMU_TLB_FLUSH_ASID(x)          (((x) & 0x7f) << 24)
95#define  SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
96					  SMMU_TLB_FLUSH_VA_MATCH_SECTION)
97#define  SMMU_TLB_FLUSH_VA_GROUP(addr)   ((((addr) & 0xffffc000) >> 12) | \
98					  SMMU_TLB_FLUSH_VA_MATCH_GROUP)
99#define  SMMU_TLB_FLUSH_ASID_MATCH       (1 << 31)
100
101#define SMMU_PTC_FLUSH 0x034
102#define  SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
103#define  SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
104
105#define SMMU_PTC_FLUSH_HI 0x9b8
106#define  SMMU_PTC_FLUSH_HI_MASK 0x3
107
108/* per-SWGROUP SMMU_*_ASID register */
109#define SMMU_ASID_ENABLE (1 << 31)
110#define SMMU_ASID_MASK 0x7f
111#define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
112
113/* page table definitions */
114#define SMMU_NUM_PDE 1024
115#define SMMU_NUM_PTE 1024
116
117#define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
118#define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
119
120#define SMMU_PDE_SHIFT 22
121#define SMMU_PTE_SHIFT 12
122
123#define SMMU_PD_READABLE	(1 << 31)
124#define SMMU_PD_WRITABLE	(1 << 30)
125#define SMMU_PD_NONSECURE	(1 << 29)
126
127#define SMMU_PDE_READABLE	(1 << 31)
128#define SMMU_PDE_WRITABLE	(1 << 30)
129#define SMMU_PDE_NONSECURE	(1 << 29)
130#define SMMU_PDE_NEXT		(1 << 28)
131
132#define SMMU_PTE_READABLE	(1 << 31)
133#define SMMU_PTE_WRITABLE	(1 << 30)
134#define SMMU_PTE_NONSECURE	(1 << 29)
135
136#define SMMU_PDE_ATTR		(SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
137				 SMMU_PDE_NONSECURE)
138#define SMMU_PTE_ATTR		(SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
139				 SMMU_PTE_NONSECURE)
140
141static unsigned int iova_pd_index(unsigned long iova)
142{
143	return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
144}
145
146static unsigned int iova_pt_index(unsigned long iova)
147{
148	return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
149}
150
151static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
152{
153	addr >>= 12;
154	return (addr & smmu->pfn_mask) == addr;
155}
156
157static dma_addr_t smmu_pde_to_dma(u32 pde)
158{
159	return pde << 12;
160}
161
162static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
163{
164	smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
165}
166
167static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
168				  unsigned long offset)
169{
170	u32 value;
171
172	offset &= ~(smmu->mc->soc->atom_size - 1);
173
174	if (smmu->mc->soc->num_address_bits > 32) {
175#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
176		value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
177#else
178		value = 0;
179#endif
180		smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
181	}
182
183	value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
184	smmu_writel(smmu, value, SMMU_PTC_FLUSH);
185}
186
187static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
188{
189	smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
190}
191
192static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
193				       unsigned long asid)
194{
195	u32 value;
196
197	value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
198		SMMU_TLB_FLUSH_VA_MATCH_ALL;
199	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
200}
201
202static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
203					  unsigned long asid,
204					  unsigned long iova)
205{
206	u32 value;
207
208	value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
209		SMMU_TLB_FLUSH_VA_SECTION(iova);
210	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
211}
212
213static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
214					unsigned long asid,
215					unsigned long iova)
216{
217	u32 value;
218
219	value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
220		SMMU_TLB_FLUSH_VA_GROUP(iova);
221	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
222}
223
224static inline void smmu_flush(struct tegra_smmu *smmu)
225{
226	smmu_readl(smmu, SMMU_CONFIG);
227}
228
229static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
230{
231	unsigned long id;
232
233	mutex_lock(&smmu->lock);
234
235	id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
236	if (id >= smmu->soc->num_asids) {
237		mutex_unlock(&smmu->lock);
238		return -ENOSPC;
239	}
240
241	set_bit(id, smmu->asids);
242	*idp = id;
243
244	mutex_unlock(&smmu->lock);
245	return 0;
246}
247
248static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
249{
250	mutex_lock(&smmu->lock);
251	clear_bit(id, smmu->asids);
252	mutex_unlock(&smmu->lock);
253}
254
255static bool tegra_smmu_capable(enum iommu_cap cap)
256{
257	return false;
258}
259
260static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
261{
262	struct tegra_smmu_as *as;
263
264	if (type != IOMMU_DOMAIN_UNMANAGED)
265		return NULL;
266
267	as = kzalloc(sizeof(*as), GFP_KERNEL);
268	if (!as)
269		return NULL;
270
271	as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
272
273	as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
274	if (!as->pd) {
275		kfree(as);
276		return NULL;
277	}
278
279	as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
280	if (!as->count) {
281		__free_page(as->pd);
282		kfree(as);
283		return NULL;
284	}
285
286	as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
287	if (!as->pts) {
288		kfree(as->count);
289		__free_page(as->pd);
290		kfree(as);
291		return NULL;
292	}
293
294	/* setup aperture */
295	as->domain.geometry.aperture_start = 0;
296	as->domain.geometry.aperture_end = 0xffffffff;
297	as->domain.geometry.force_aperture = true;
298
299	return &as->domain;
300}
301
302static void tegra_smmu_domain_free(struct iommu_domain *domain)
303{
304	struct tegra_smmu_as *as = to_smmu_as(domain);
305
306	/* TODO: free page directory and page tables */
307
308	kfree(as);
309}
310
311static const struct tegra_smmu_swgroup *
312tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
313{
314	const struct tegra_smmu_swgroup *group = NULL;
315	unsigned int i;
316
317	for (i = 0; i < smmu->soc->num_swgroups; i++) {
318		if (smmu->soc->swgroups[i].swgroup == swgroup) {
319			group = &smmu->soc->swgroups[i];
320			break;
321		}
322	}
323
324	return group;
325}
326
327static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
328			      unsigned int asid)
329{
330	const struct tegra_smmu_swgroup *group;
331	unsigned int i;
332	u32 value;
333
334	for (i = 0; i < smmu->soc->num_clients; i++) {
335		const struct tegra_mc_client *client = &smmu->soc->clients[i];
336
337		if (client->swgroup != swgroup)
338			continue;
339
340		value = smmu_readl(smmu, client->smmu.reg);
341		value |= BIT(client->smmu.bit);
342		smmu_writel(smmu, value, client->smmu.reg);
343	}
344
345	group = tegra_smmu_find_swgroup(smmu, swgroup);
346	if (group) {
347		value = smmu_readl(smmu, group->reg);
348		value &= ~SMMU_ASID_MASK;
349		value |= SMMU_ASID_VALUE(asid);
350		value |= SMMU_ASID_ENABLE;
351		smmu_writel(smmu, value, group->reg);
352	}
353}
354
355static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
356			       unsigned int asid)
357{
358	const struct tegra_smmu_swgroup *group;
359	unsigned int i;
360	u32 value;
361
362	group = tegra_smmu_find_swgroup(smmu, swgroup);
363	if (group) {
364		value = smmu_readl(smmu, group->reg);
365		value &= ~SMMU_ASID_MASK;
366		value |= SMMU_ASID_VALUE(asid);
367		value &= ~SMMU_ASID_ENABLE;
368		smmu_writel(smmu, value, group->reg);
369	}
370
371	for (i = 0; i < smmu->soc->num_clients; i++) {
372		const struct tegra_mc_client *client = &smmu->soc->clients[i];
373
374		if (client->swgroup != swgroup)
375			continue;
376
377		value = smmu_readl(smmu, client->smmu.reg);
378		value &= ~BIT(client->smmu.bit);
379		smmu_writel(smmu, value, client->smmu.reg);
380	}
381}
382
383static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
384				 struct tegra_smmu_as *as)
385{
386	u32 value;
387	int err;
388
389	if (as->use_count > 0) {
390		as->use_count++;
391		return 0;
392	}
393
394	as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
395				  DMA_TO_DEVICE);
396	if (dma_mapping_error(smmu->dev, as->pd_dma))
397		return -ENOMEM;
398
399	/* We can't handle 64-bit DMA addresses */
400	if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
401		err = -ENOMEM;
402		goto err_unmap;
403	}
404
405	err = tegra_smmu_alloc_asid(smmu, &as->id);
406	if (err < 0)
407		goto err_unmap;
408
409	smmu_flush_ptc(smmu, as->pd_dma, 0);
410	smmu_flush_tlb_asid(smmu, as->id);
411
412	smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
413	value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
414	smmu_writel(smmu, value, SMMU_PTB_DATA);
415	smmu_flush(smmu);
416
417	as->smmu = smmu;
418	as->use_count++;
419
420	return 0;
421
422err_unmap:
423	dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
424	return err;
425}
426
427static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
428				    struct tegra_smmu_as *as)
429{
430	if (--as->use_count > 0)
431		return;
432
433	tegra_smmu_free_asid(smmu, as->id);
434
435	dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
436
437	as->smmu = NULL;
438}
439
440static int tegra_smmu_attach_dev(struct iommu_domain *domain,
441				 struct device *dev)
442{
443	struct tegra_smmu *smmu = dev->archdata.iommu;
444	struct tegra_smmu_as *as = to_smmu_as(domain);
445	struct device_node *np = dev->of_node;
446	struct of_phandle_args args;
447	unsigned int index = 0;
448	int err = 0;
449
450	while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
451					   &args)) {
452		unsigned int swgroup = args.args[0];
453
454		if (args.np != smmu->dev->of_node) {
455			of_node_put(args.np);
456			continue;
457		}
458
459		of_node_put(args.np);
460
461		err = tegra_smmu_as_prepare(smmu, as);
462		if (err < 0)
463			return err;
464
465		tegra_smmu_enable(smmu, swgroup, as->id);
466		index++;
467	}
468
469	if (index == 0)
470		return -ENODEV;
471
472	return 0;
473}
474
475static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
476{
477	struct tegra_smmu_as *as = to_smmu_as(domain);
478	struct device_node *np = dev->of_node;
479	struct tegra_smmu *smmu = as->smmu;
480	struct of_phandle_args args;
481	unsigned int index = 0;
482
483	while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
484					   &args)) {
485		unsigned int swgroup = args.args[0];
486
487		if (args.np != smmu->dev->of_node) {
488			of_node_put(args.np);
489			continue;
490		}
491
492		of_node_put(args.np);
493
494		tegra_smmu_disable(smmu, swgroup, as->id);
495		tegra_smmu_as_unprepare(smmu, as);
496		index++;
497	}
498}
499
500static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
501			       u32 value)
502{
503	unsigned int pd_index = iova_pd_index(iova);
504	struct tegra_smmu *smmu = as->smmu;
505	u32 *pd = page_address(as->pd);
506	unsigned long offset = pd_index * sizeof(*pd);
507
508	/* Set the page directory entry first */
509	pd[pd_index] = value;
510
511	/* The flush the page directory entry from caches */
512	dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
513					 sizeof(*pd), DMA_TO_DEVICE);
514
515	/* And flush the iommu */
516	smmu_flush_ptc(smmu, as->pd_dma, offset);
517	smmu_flush_tlb_section(smmu, as->id, iova);
518	smmu_flush(smmu);
519}
520
521static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
522{
523	u32 *pt = page_address(pt_page);
524
525	return pt + iova_pt_index(iova);
526}
527
528static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
529				  dma_addr_t *dmap)
530{
531	unsigned int pd_index = iova_pd_index(iova);
532	struct page *pt_page;
533	u32 *pd;
534
535	pt_page = as->pts[pd_index];
536	if (!pt_page)
537		return NULL;
538
539	pd = page_address(as->pd);
540	*dmap = smmu_pde_to_dma(pd[pd_index]);
541
542	return tegra_smmu_pte_offset(pt_page, iova);
543}
544
545static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
546		       dma_addr_t *dmap)
547{
548	unsigned int pde = iova_pd_index(iova);
549	struct tegra_smmu *smmu = as->smmu;
550
551	if (!as->pts[pde]) {
552		struct page *page;
553		dma_addr_t dma;
554
555		page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
556		if (!page)
557			return NULL;
558
559		dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
560				   DMA_TO_DEVICE);
561		if (dma_mapping_error(smmu->dev, dma)) {
562			__free_page(page);
563			return NULL;
564		}
565
566		if (!smmu_dma_addr_valid(smmu, dma)) {
567			dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
568				       DMA_TO_DEVICE);
569			__free_page(page);
570			return NULL;
571		}
572
573		as->pts[pde] = page;
574
575		tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
576							      SMMU_PDE_NEXT));
577
578		*dmap = dma;
579	} else {
580		u32 *pd = page_address(as->pd);
581
582		*dmap = smmu_pde_to_dma(pd[pde]);
583	}
584
585	return tegra_smmu_pte_offset(as->pts[pde], iova);
586}
587
588static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
589{
590	unsigned int pd_index = iova_pd_index(iova);
591
592	as->count[pd_index]++;
593}
594
595static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
596{
597	unsigned int pde = iova_pd_index(iova);
598	struct page *page = as->pts[pde];
599
600	/*
601	 * When no entries in this page table are used anymore, return the
602	 * memory page to the system.
603	 */
604	if (--as->count[pde] == 0) {
605		struct tegra_smmu *smmu = as->smmu;
606		u32 *pd = page_address(as->pd);
607		dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
608
609		tegra_smmu_set_pde(as, iova, 0);
610
611		dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
612		__free_page(page);
613		as->pts[pde] = NULL;
614	}
615}
616
617static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
618			       u32 *pte, dma_addr_t pte_dma, u32 val)
619{
620	struct tegra_smmu *smmu = as->smmu;
621	unsigned long offset = offset_in_page(pte);
622
623	*pte = val;
624
625	dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
626					 4, DMA_TO_DEVICE);
627	smmu_flush_ptc(smmu, pte_dma, offset);
628	smmu_flush_tlb_group(smmu, as->id, iova);
629	smmu_flush(smmu);
630}
631
632static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
633			  phys_addr_t paddr, size_t size, int prot)
634{
635	struct tegra_smmu_as *as = to_smmu_as(domain);
636	dma_addr_t pte_dma;
637	u32 *pte;
638
639	pte = as_get_pte(as, iova, &pte_dma);
640	if (!pte)
641		return -ENOMEM;
642
643	/* If we aren't overwriting a pre-existing entry, increment use */
644	if (*pte == 0)
645		tegra_smmu_pte_get_use(as, iova);
646
647	tegra_smmu_set_pte(as, iova, pte, pte_dma,
648			   __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
649
650	return 0;
651}
652
653static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
654			       size_t size)
655{
656	struct tegra_smmu_as *as = to_smmu_as(domain);
657	dma_addr_t pte_dma;
658	u32 *pte;
659
660	pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
661	if (!pte || !*pte)
662		return 0;
663
664	tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
665	tegra_smmu_pte_put_use(as, iova);
666
667	return size;
668}
669
670static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
671					   dma_addr_t iova)
672{
673	struct tegra_smmu_as *as = to_smmu_as(domain);
674	unsigned long pfn;
675	dma_addr_t pte_dma;
676	u32 *pte;
677
678	pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
679	if (!pte || !*pte)
680		return 0;
681
682	pfn = *pte & as->smmu->pfn_mask;
683
684	return PFN_PHYS(pfn);
685}
686
687static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
688{
689	struct platform_device *pdev;
690	struct tegra_mc *mc;
691
692	pdev = of_find_device_by_node(np);
693	if (!pdev)
694		return NULL;
695
696	mc = platform_get_drvdata(pdev);
697	if (!mc)
698		return NULL;
699
700	return mc->smmu;
701}
702
703static int tegra_smmu_add_device(struct device *dev)
704{
705	struct device_node *np = dev->of_node;
706	struct of_phandle_args args;
707	unsigned int index = 0;
708
709	while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
710					  &args) == 0) {
711		struct tegra_smmu *smmu;
712
713		smmu = tegra_smmu_find(args.np);
714		if (smmu) {
715			/*
716			 * Only a single IOMMU master interface is currently
717			 * supported by the Linux kernel, so abort after the
718			 * first match.
719			 */
720			dev->archdata.iommu = smmu;
721			break;
722		}
723
724		index++;
725	}
726
727	return 0;
728}
729
730static void tegra_smmu_remove_device(struct device *dev)
731{
732	dev->archdata.iommu = NULL;
733}
734
735static const struct iommu_ops tegra_smmu_ops = {
736	.capable = tegra_smmu_capable,
737	.domain_alloc = tegra_smmu_domain_alloc,
738	.domain_free = tegra_smmu_domain_free,
739	.attach_dev = tegra_smmu_attach_dev,
740	.detach_dev = tegra_smmu_detach_dev,
741	.add_device = tegra_smmu_add_device,
742	.remove_device = tegra_smmu_remove_device,
743	.map = tegra_smmu_map,
744	.unmap = tegra_smmu_unmap,
745	.map_sg = default_iommu_map_sg,
746	.iova_to_phys = tegra_smmu_iova_to_phys,
747
748	.pgsize_bitmap = SZ_4K,
749};
750
751static void tegra_smmu_ahb_enable(void)
752{
753	static const struct of_device_id ahb_match[] = {
754		{ .compatible = "nvidia,tegra30-ahb", },
755		{ }
756	};
757	struct device_node *ahb;
758
759	ahb = of_find_matching_node(NULL, ahb_match);
760	if (ahb) {
761		tegra_ahb_enable_smmu(ahb);
762		of_node_put(ahb);
763	}
764}
765
766static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
767{
768	struct tegra_smmu *smmu = s->private;
769	unsigned int i;
770	u32 value;
771
772	seq_printf(s, "swgroup    enabled  ASID\n");
773	seq_printf(s, "------------------------\n");
774
775	for (i = 0; i < smmu->soc->num_swgroups; i++) {
776		const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
777		const char *status;
778		unsigned int asid;
779
780		value = smmu_readl(smmu, group->reg);
781
782		if (value & SMMU_ASID_ENABLE)
783			status = "yes";
784		else
785			status = "no";
786
787		asid = value & SMMU_ASID_MASK;
788
789		seq_printf(s, "%-9s  %-7s  %#04x\n", group->name, status,
790			   asid);
791	}
792
793	return 0;
794}
795
796static int tegra_smmu_swgroups_open(struct inode *inode, struct file *file)
797{
798	return single_open(file, tegra_smmu_swgroups_show, inode->i_private);
799}
800
801static const struct file_operations tegra_smmu_swgroups_fops = {
802	.open = tegra_smmu_swgroups_open,
803	.read = seq_read,
804	.llseek = seq_lseek,
805	.release = single_release,
806};
807
808static int tegra_smmu_clients_show(struct seq_file *s, void *data)
809{
810	struct tegra_smmu *smmu = s->private;
811	unsigned int i;
812	u32 value;
813
814	seq_printf(s, "client       enabled\n");
815	seq_printf(s, "--------------------\n");
816
817	for (i = 0; i < smmu->soc->num_clients; i++) {
818		const struct tegra_mc_client *client = &smmu->soc->clients[i];
819		const char *status;
820
821		value = smmu_readl(smmu, client->smmu.reg);
822
823		if (value & BIT(client->smmu.bit))
824			status = "yes";
825		else
826			status = "no";
827
828		seq_printf(s, "%-12s %s\n", client->name, status);
829	}
830
831	return 0;
832}
833
834static int tegra_smmu_clients_open(struct inode *inode, struct file *file)
835{
836	return single_open(file, tegra_smmu_clients_show, inode->i_private);
837}
838
839static const struct file_operations tegra_smmu_clients_fops = {
840	.open = tegra_smmu_clients_open,
841	.read = seq_read,
842	.llseek = seq_lseek,
843	.release = single_release,
844};
845
846static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
847{
848	smmu->debugfs = debugfs_create_dir("smmu", NULL);
849	if (!smmu->debugfs)
850		return;
851
852	debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
853			    &tegra_smmu_swgroups_fops);
854	debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
855			    &tegra_smmu_clients_fops);
856}
857
858static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
859{
860	debugfs_remove_recursive(smmu->debugfs);
861}
862
863struct tegra_smmu *tegra_smmu_probe(struct device *dev,
864				    const struct tegra_smmu_soc *soc,
865				    struct tegra_mc *mc)
866{
867	struct tegra_smmu *smmu;
868	size_t size;
869	u32 value;
870	int err;
871
872	/* This can happen on Tegra20 which doesn't have an SMMU */
873	if (!soc)
874		return NULL;
875
876	smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
877	if (!smmu)
878		return ERR_PTR(-ENOMEM);
879
880	/*
881	 * This is a bit of a hack. Ideally we'd want to simply return this
882	 * value. However the IOMMU registration process will attempt to add
883	 * all devices to the IOMMU when bus_set_iommu() is called. In order
884	 * not to rely on global variables to track the IOMMU instance, we
885	 * set it here so that it can be looked up from the .add_device()
886	 * callback via the IOMMU device's .drvdata field.
887	 */
888	mc->smmu = smmu;
889
890	size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
891
892	smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
893	if (!smmu->asids)
894		return ERR_PTR(-ENOMEM);
895
896	mutex_init(&smmu->lock);
897
898	smmu->regs = mc->regs;
899	smmu->soc = soc;
900	smmu->dev = dev;
901	smmu->mc = mc;
902
903	smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
904	dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
905		mc->soc->num_address_bits, smmu->pfn_mask);
906	smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
907	dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
908		smmu->tlb_mask);
909
910	value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
911
912	if (soc->supports_request_limit)
913		value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
914
915	smmu_writel(smmu, value, SMMU_PTC_CONFIG);
916
917	value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
918		SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
919
920	if (soc->supports_round_robin_arbitration)
921		value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
922
923	smmu_writel(smmu, value, SMMU_TLB_CONFIG);
924
925	smmu_flush_ptc_all(smmu);
926	smmu_flush_tlb(smmu);
927	smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
928	smmu_flush(smmu);
929
930	tegra_smmu_ahb_enable();
931
932	err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
933	if (err < 0)
934		return ERR_PTR(err);
935
936	if (IS_ENABLED(CONFIG_DEBUG_FS))
937		tegra_smmu_debugfs_init(smmu);
938
939	return smmu;
940}
941
942void tegra_smmu_remove(struct tegra_smmu *smmu)
943{
944	if (IS_ENABLED(CONFIG_DEBUG_FS))
945		tegra_smmu_debugfs_exit(smmu);
946}
947