This source file includes following definitions.
- to_imgu_mmu
- imgu_mmu_tlb_invalidate
- call_if_imgu_is_powered
- imgu_mmu_set_halt
- imgu_mmu_alloc_page_table
- imgu_mmu_free_page_table
- address_to_pte_idx
- imgu_mmu_get_l2pt
- __imgu_mmu_map
- imgu_mmu_map
- imgu_mmu_map_sg
- __imgu_mmu_unmap
- imgu_mmu_unmap
- imgu_mmu_init
- imgu_mmu_exit
- imgu_mmu_suspend
- imgu_mmu_resume
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/dma-mapping.h>
14 #include <linux/iopoll.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18
19 #include <asm/set_memory.h>
20
21 #include "ipu3-mmu.h"
22
23 #define IPU3_PT_BITS 10
24 #define IPU3_PT_PTES (1UL << IPU3_PT_BITS)
25 #define IPU3_PT_SIZE (IPU3_PT_PTES << 2)
26 #define IPU3_PT_ORDER (IPU3_PT_SIZE >> PAGE_SHIFT)
27
28 #define IPU3_ADDR2PTE(addr) ((addr) >> IPU3_PAGE_SHIFT)
29 #define IPU3_PTE2ADDR(pte) ((phys_addr_t)(pte) << IPU3_PAGE_SHIFT)
30
31 #define IPU3_L2PT_SHIFT IPU3_PT_BITS
32 #define IPU3_L2PT_MASK ((1UL << IPU3_L2PT_SHIFT) - 1)
33
34 #define IPU3_L1PT_SHIFT IPU3_PT_BITS
35 #define IPU3_L1PT_MASK ((1UL << IPU3_L1PT_SHIFT) - 1)
36
37 #define IPU3_MMU_ADDRESS_BITS (IPU3_PAGE_SHIFT + \
38 IPU3_L2PT_SHIFT + \
39 IPU3_L1PT_SHIFT)
40
41 #define IMGU_REG_BASE 0x4000
42 #define REG_TLB_INVALIDATE (IMGU_REG_BASE + 0x300)
43 #define TLB_INVALIDATE 1
44 #define REG_L1_PHYS (IMGU_REG_BASE + 0x304)
45 #define REG_GP_HALT (IMGU_REG_BASE + 0x5dc)
46 #define REG_GP_HALTED (IMGU_REG_BASE + 0x5e0)
47
48 struct imgu_mmu {
49 struct device *dev;
50 void __iomem *base;
51
52 spinlock_t lock;
53
54 void *dummy_page;
55 u32 dummy_page_pteval;
56
57 u32 *dummy_l2pt;
58 u32 dummy_l2pt_pteval;
59
60 u32 **l2pts;
61 u32 *l1pt;
62
63 struct imgu_mmu_info geometry;
64 };
65
66 static inline struct imgu_mmu *to_imgu_mmu(struct imgu_mmu_info *info)
67 {
68 return container_of(info, struct imgu_mmu, geometry);
69 }
70
71
72
73
74
75
76
77
78 static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu)
79 {
80 writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE);
81 }
82
83 static void call_if_imgu_is_powered(struct imgu_mmu *mmu,
84 void (*func)(struct imgu_mmu *mmu))
85 {
86 if (!pm_runtime_get_if_in_use(mmu->dev))
87 return;
88
89 func(mmu);
90 pm_runtime_put(mmu->dev);
91 }
92
93
94
95
96
97
98
99
100
101 static void imgu_mmu_set_halt(struct imgu_mmu *mmu, bool halt)
102 {
103 int ret;
104 u32 val;
105
106 writel(halt, mmu->base + REG_GP_HALT);
107 ret = readl_poll_timeout(mmu->base + REG_GP_HALTED,
108 val, (val & 1) == halt, 1000, 100000);
109
110 if (ret)
111 dev_err(mmu->dev, "failed to %s CIO gate halt\n",
112 halt ? "set" : "clear");
113 }
114
115
116
117
118
119
120
121 static u32 *imgu_mmu_alloc_page_table(u32 pteval)
122 {
123 u32 *pt;
124 int pte;
125
126 pt = (u32 *)__get_free_page(GFP_KERNEL);
127 if (!pt)
128 return NULL;
129
130 for (pte = 0; pte < IPU3_PT_PTES; pte++)
131 pt[pte] = pteval;
132
133 set_memory_uc((unsigned long int)pt, IPU3_PT_ORDER);
134
135 return pt;
136 }
137
138
139
140
141
142 static void imgu_mmu_free_page_table(u32 *pt)
143 {
144 set_memory_wb((unsigned long int)pt, IPU3_PT_ORDER);
145 free_page((unsigned long)pt);
146 }
147
148
149
150
151
152
153
154 static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx,
155 u32 *l2pt_idx)
156 {
157 iova >>= IPU3_PAGE_SHIFT;
158
159 if (l2pt_idx)
160 *l2pt_idx = iova & IPU3_L2PT_MASK;
161
162 iova >>= IPU3_L2PT_SHIFT;
163
164 if (l1pt_idx)
165 *l1pt_idx = iova & IPU3_L1PT_MASK;
166 }
167
168 static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx)
169 {
170 unsigned long flags;
171 u32 *l2pt, *new_l2pt;
172 u32 pteval;
173
174 spin_lock_irqsave(&mmu->lock, flags);
175
176 l2pt = mmu->l2pts[l1pt_idx];
177 if (l2pt)
178 goto done;
179
180 spin_unlock_irqrestore(&mmu->lock, flags);
181
182 new_l2pt = imgu_mmu_alloc_page_table(mmu->dummy_page_pteval);
183 if (!new_l2pt)
184 return NULL;
185
186 spin_lock_irqsave(&mmu->lock, flags);
187
188 dev_dbg(mmu->dev, "allocated page table %p for l1pt_idx %u\n",
189 new_l2pt, l1pt_idx);
190
191 l2pt = mmu->l2pts[l1pt_idx];
192 if (l2pt) {
193 imgu_mmu_free_page_table(new_l2pt);
194 goto done;
195 }
196
197 l2pt = new_l2pt;
198 mmu->l2pts[l1pt_idx] = new_l2pt;
199
200 pteval = IPU3_ADDR2PTE(virt_to_phys(new_l2pt));
201 mmu->l1pt[l1pt_idx] = pteval;
202
203 done:
204 spin_unlock_irqrestore(&mmu->lock, flags);
205 return l2pt;
206 }
207
208 static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova,
209 phys_addr_t paddr)
210 {
211 u32 l1pt_idx, l2pt_idx;
212 unsigned long flags;
213 u32 *l2pt;
214
215 if (!mmu)
216 return -ENODEV;
217
218 address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
219
220 l2pt = imgu_mmu_get_l2pt(mmu, l1pt_idx);
221 if (!l2pt)
222 return -ENOMEM;
223
224 spin_lock_irqsave(&mmu->lock, flags);
225
226 if (l2pt[l2pt_idx] != mmu->dummy_page_pteval) {
227 spin_unlock_irqrestore(&mmu->lock, flags);
228 return -EBUSY;
229 }
230
231 l2pt[l2pt_idx] = IPU3_ADDR2PTE(paddr);
232
233 spin_unlock_irqrestore(&mmu->lock, flags);
234
235 return 0;
236 }
237
238
239
240
241
242
243
244
245
246
247
248
249 int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
250 phys_addr_t paddr, size_t size)
251 {
252 struct imgu_mmu *mmu = to_imgu_mmu(info);
253 int ret = 0;
254
255
256
257
258
259
260 if (!IS_ALIGNED(iova | paddr | size, IPU3_PAGE_SIZE)) {
261 dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx\n",
262 iova, &paddr, size);
263 return -EINVAL;
264 }
265
266 dev_dbg(mmu->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
267 iova, &paddr, size);
268
269 while (size) {
270 dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa\n", iova, &paddr);
271
272 ret = __imgu_mmu_map(mmu, iova, paddr);
273 if (ret)
274 break;
275
276 iova += IPU3_PAGE_SIZE;
277 paddr += IPU3_PAGE_SIZE;
278 size -= IPU3_PAGE_SIZE;
279 }
280
281 call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
282
283 return ret;
284 }
285
286
287
288
289
290
291
292
293
294
295
296
297 size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
298 struct scatterlist *sg, unsigned int nents)
299 {
300 struct imgu_mmu *mmu = to_imgu_mmu(info);
301 struct scatterlist *s;
302 size_t s_length, mapped = 0;
303 unsigned int i;
304 int ret;
305
306 for_each_sg(sg, s, nents, i) {
307 phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
308
309 s_length = s->length;
310
311 if (!IS_ALIGNED(s->offset, IPU3_PAGE_SIZE))
312 goto out_err;
313
314
315 if (i == nents - 1 && !IS_ALIGNED(s->length, IPU3_PAGE_SIZE))
316 s_length = PAGE_ALIGN(s->length);
317
318 ret = imgu_mmu_map(info, iova + mapped, phys, s_length);
319 if (ret)
320 goto out_err;
321
322 mapped += s_length;
323 }
324
325 call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
326
327 return mapped;
328
329 out_err:
330
331 imgu_mmu_unmap(info, iova, mapped);
332
333 return 0;
334 }
335
336 static size_t __imgu_mmu_unmap(struct imgu_mmu *mmu,
337 unsigned long iova, size_t size)
338 {
339 u32 l1pt_idx, l2pt_idx;
340 unsigned long flags;
341 size_t unmap = size;
342 u32 *l2pt;
343
344 if (!mmu)
345 return 0;
346
347 address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
348
349 spin_lock_irqsave(&mmu->lock, flags);
350
351 l2pt = mmu->l2pts[l1pt_idx];
352 if (!l2pt) {
353 spin_unlock_irqrestore(&mmu->lock, flags);
354 return 0;
355 }
356
357 if (l2pt[l2pt_idx] == mmu->dummy_page_pteval)
358 unmap = 0;
359
360 l2pt[l2pt_idx] = mmu->dummy_page_pteval;
361
362 spin_unlock_irqrestore(&mmu->lock, flags);
363
364 return unmap;
365 }
366
367
368
369
370
371
372
373
374
375
376
377 size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
378 size_t size)
379 {
380 struct imgu_mmu *mmu = to_imgu_mmu(info);
381 size_t unmapped_page, unmapped = 0;
382
383
384
385
386
387
388 if (!IS_ALIGNED(iova | size, IPU3_PAGE_SIZE)) {
389 dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx\n",
390 iova, size);
391 return -EINVAL;
392 }
393
394 dev_dbg(mmu->dev, "unmap this: iova 0x%lx size 0x%zx\n", iova, size);
395
396
397
398
399
400 while (unmapped < size) {
401 unmapped_page = __imgu_mmu_unmap(mmu, iova, IPU3_PAGE_SIZE);
402 if (!unmapped_page)
403 break;
404
405 dev_dbg(mmu->dev, "unmapped: iova 0x%lx size 0x%zx\n",
406 iova, unmapped_page);
407
408 iova += unmapped_page;
409 unmapped += unmapped_page;
410 }
411
412 call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
413
414 return unmapped;
415 }
416
417
418
419
420
421
422
423
424
425 struct imgu_mmu_info *imgu_mmu_init(struct device *parent, void __iomem *base)
426 {
427 struct imgu_mmu *mmu;
428 u32 pteval;
429
430 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
431 if (!mmu)
432 return ERR_PTR(-ENOMEM);
433
434 mmu->dev = parent;
435 mmu->base = base;
436 spin_lock_init(&mmu->lock);
437
438
439 imgu_mmu_set_halt(mmu, true);
440
441
442
443
444
445 mmu->dummy_page = (void *)__get_free_page(GFP_KERNEL);
446 if (!mmu->dummy_page)
447 goto fail_group;
448 pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_page));
449 mmu->dummy_page_pteval = pteval;
450
451
452
453
454
455 mmu->dummy_l2pt = imgu_mmu_alloc_page_table(pteval);
456 if (!mmu->dummy_l2pt)
457 goto fail_dummy_page;
458 pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_l2pt));
459 mmu->dummy_l2pt_pteval = pteval;
460
461
462
463
464
465 mmu->l2pts = vzalloc(IPU3_PT_PTES * sizeof(*mmu->l2pts));
466 if (!mmu->l2pts)
467 goto fail_l2pt;
468
469
470 mmu->l1pt = imgu_mmu_alloc_page_table(mmu->dummy_l2pt_pteval);
471 if (!mmu->l1pt)
472 goto fail_l2pts;
473
474 pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
475 writel(pteval, mmu->base + REG_L1_PHYS);
476 imgu_mmu_tlb_invalidate(mmu);
477 imgu_mmu_set_halt(mmu, false);
478
479 mmu->geometry.aperture_start = 0;
480 mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS);
481
482 return &mmu->geometry;
483
484 fail_l2pts:
485 vfree(mmu->l2pts);
486 fail_l2pt:
487 imgu_mmu_free_page_table(mmu->dummy_l2pt);
488 fail_dummy_page:
489 free_page((unsigned long)mmu->dummy_page);
490 fail_group:
491 kfree(mmu);
492
493 return ERR_PTR(-ENOMEM);
494 }
495
496
497
498
499
500
501 void imgu_mmu_exit(struct imgu_mmu_info *info)
502 {
503 struct imgu_mmu *mmu = to_imgu_mmu(info);
504
505
506 imgu_mmu_set_halt(mmu, true);
507 imgu_mmu_tlb_invalidate(mmu);
508
509 imgu_mmu_free_page_table(mmu->l1pt);
510 vfree(mmu->l2pts);
511 imgu_mmu_free_page_table(mmu->dummy_l2pt);
512 free_page((unsigned long)mmu->dummy_page);
513 kfree(mmu);
514 }
515
516 void imgu_mmu_suspend(struct imgu_mmu_info *info)
517 {
518 struct imgu_mmu *mmu = to_imgu_mmu(info);
519
520 imgu_mmu_set_halt(mmu, true);
521 }
522
523 void imgu_mmu_resume(struct imgu_mmu_info *info)
524 {
525 struct imgu_mmu *mmu = to_imgu_mmu(info);
526 u32 pteval;
527
528 imgu_mmu_set_halt(mmu, true);
529
530 pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
531 writel(pteval, mmu->base + REG_L1_PHYS);
532
533 imgu_mmu_tlb_invalidate(mmu);
534 imgu_mmu_set_halt(mmu, false);
535 }