This source file includes following definitions.
- __flush_icache_all
- vivt_flush_cache_mm
- vivt_flush_cache_range
- vivt_flush_cache_page
- flush_kernel_vmap_range
- invalidate_kernel_vmap_range
- flush_anon_page
- flush_cache_vmap
- flush_cache_vunmap
- __sync_cache_range_w
- __sync_cache_range_r
- check_cpu_icache_size
1
2
3
4
5
6
7 #ifndef _ASMARM_CACHEFLUSH_H
8 #define _ASMARM_CACHEFLUSH_H
9
10 #include <linux/mm.h>
11
12 #include <asm/glue-cache.h>
13 #include <asm/shmparam.h>
14 #include <asm/cachetype.h>
15 #include <asm/outercache.h>
16
17 #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
18
19
20
21
22
23 #define PG_dcache_clean PG_arch_1
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101 struct cpu_cache_fns {
102 void (*flush_icache_all)(void);
103 void (*flush_kern_all)(void);
104 void (*flush_kern_louis)(void);
105 void (*flush_user_all)(void);
106 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
107
108 void (*coherent_kern_range)(unsigned long, unsigned long);
109 int (*coherent_user_range)(unsigned long, unsigned long);
110 void (*flush_kern_dcache_area)(void *, size_t);
111
112 void (*dma_map_area)(const void *, size_t, int);
113 void (*dma_unmap_area)(const void *, size_t, int);
114
115 void (*dma_flush_range)(const void *, const void *);
116 } __no_randomize_layout;
117
118
119
120
121 #ifdef MULTI_CACHE
122
123 extern struct cpu_cache_fns cpu_cache;
124
125 #define __cpuc_flush_icache_all cpu_cache.flush_icache_all
126 #define __cpuc_flush_kern_all cpu_cache.flush_kern_all
127 #define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
128 #define __cpuc_flush_user_all cpu_cache.flush_user_all
129 #define __cpuc_flush_user_range cpu_cache.flush_user_range
130 #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
131 #define __cpuc_coherent_user_range cpu_cache.coherent_user_range
132 #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
133
134
135
136
137
138
139
140 #define dmac_flush_range cpu_cache.dma_flush_range
141
142 #else
143
144 extern void __cpuc_flush_icache_all(void);
145 extern void __cpuc_flush_kern_all(void);
146 extern void __cpuc_flush_kern_louis(void);
147 extern void __cpuc_flush_user_all(void);
148 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
149 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
150 extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
151 extern void __cpuc_flush_dcache_area(void *, size_t);
152
153
154
155
156
157
158
159 extern void dmac_flush_range(const void *, const void *);
160
161 #endif
162
163
164
165
166
167
168 extern void copy_to_user_page(struct vm_area_struct *, struct page *,
169 unsigned long, void *, const void *, unsigned long);
170 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
171 do { \
172 memcpy(dst, src, len); \
173 } while (0)
174
175
176
177
178
179
180 #define __flush_icache_all_generic() \
181 asm("mcr p15, 0, %0, c7, c5, 0" \
182 : : "r" (0));
183
184
185 #define __flush_icache_all_v7_smp() \
186 asm("mcr p15, 0, %0, c7, c1, 0" \
187 : : "r" (0));
188
189
190
191
192
193 #if (defined(CONFIG_CPU_V7) && \
194 (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
195 defined(CONFIG_SMP_ON_UP)
196 #define __flush_icache_preferred __cpuc_flush_icache_all
197 #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
198 #define __flush_icache_preferred __flush_icache_all_v7_smp
199 #elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
200 #define __flush_icache_preferred __cpuc_flush_icache_all
201 #else
202 #define __flush_icache_preferred __flush_icache_all_generic
203 #endif
204
205 static inline void __flush_icache_all(void)
206 {
207 __flush_icache_preferred();
208 dsb(ishst);
209 }
210
211
212
213
214 #define flush_cache_louis() __cpuc_flush_kern_louis()
215
216 #define flush_cache_all() __cpuc_flush_kern_all()
217
218 static inline void vivt_flush_cache_mm(struct mm_struct *mm)
219 {
220 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
221 __cpuc_flush_user_all();
222 }
223
224 static inline void
225 vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
226 {
227 struct mm_struct *mm = vma->vm_mm;
228
229 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
230 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
231 vma->vm_flags);
232 }
233
234 static inline void
235 vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
236 {
237 struct mm_struct *mm = vma->vm_mm;
238
239 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
240 unsigned long addr = user_addr & PAGE_MASK;
241 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
242 }
243 }
244
245 #ifndef CONFIG_CPU_CACHE_VIPT
246 #define flush_cache_mm(mm) \
247 vivt_flush_cache_mm(mm)
248 #define flush_cache_range(vma,start,end) \
249 vivt_flush_cache_range(vma,start,end)
250 #define flush_cache_page(vma,addr,pfn) \
251 vivt_flush_cache_page(vma,addr,pfn)
252 #else
253 extern void flush_cache_mm(struct mm_struct *mm);
254 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
255 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
256 #endif
257
258 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
259
260
261
262
263
264
265 #define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e)
266
267
268
269
270
271 #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
272
273
274
275
276
277 #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
278
279
280
281
282
283
284
285
286
287
288
289
290
291 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
292 extern void flush_dcache_page(struct page *);
293
294 static inline void flush_kernel_vmap_range(void *addr, int size)
295 {
296 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
297 __cpuc_flush_dcache_area(addr, (size_t)size);
298 }
299 static inline void invalidate_kernel_vmap_range(void *addr, int size)
300 {
301 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
302 __cpuc_flush_dcache_area(addr, (size_t)size);
303 }
304
305 #define ARCH_HAS_FLUSH_ANON_PAGE
306 static inline void flush_anon_page(struct vm_area_struct *vma,
307 struct page *page, unsigned long vmaddr)
308 {
309 extern void __flush_anon_page(struct vm_area_struct *vma,
310 struct page *, unsigned long);
311 if (PageAnon(page))
312 __flush_anon_page(vma, page, vmaddr);
313 }
314
315 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
316 extern void flush_kernel_dcache_page(struct page *);
317
318 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
319 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
320
321 #define flush_icache_user_range(vma,page,addr,len) \
322 flush_dcache_page(page)
323
324
325
326
327
328 #define flush_icache_page(vma,page) do { } while (0)
329
330
331
332
333
334
335
336
337 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
338 {
339 if (!cache_is_vipt_nonaliasing())
340 flush_cache_all();
341 else
342
343
344
345
346 dsb(ishst);
347 }
348
349 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
350 {
351 if (!cache_is_vipt_nonaliasing())
352 flush_cache_all();
353 }
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379 #define __CACHE_WRITEBACK_ORDER 6
380 #define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
381
382
383
384
385
386 #define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
387
388
389
390
391
392 static inline void __sync_cache_range_w(volatile void *p, size_t size)
393 {
394 char *_p = (char *)p;
395
396 __cpuc_clean_dcache_area(_p, size);
397 outer_clean_range(__pa(_p), __pa(_p + size));
398 }
399
400
401
402
403
404
405
406 static inline void __sync_cache_range_r(volatile void *p, size_t size)
407 {
408 char *_p = (char *)p;
409
410 #ifdef CONFIG_OUTER_CACHE
411 if (outer_cache.flush_range) {
412
413
414
415
416 __cpuc_clean_dcache_area(_p, size);
417
418
419 outer_flush_range(__pa(_p), __pa(_p + size));
420 }
421 #endif
422
423
424 __cpuc_flush_dcache_area(_p, size);
425 }
426
427 #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
428 #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458 #define v7_exit_coherency_flush(level) \
459 asm volatile( \
460 ".arch armv7-a \n\t" \
461 "stmfd sp!, {fp, ip} \n\t" \
462 "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
463 "bic r0, r0, #"__stringify(CR_C)" \n\t" \
464 "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
465 "isb \n\t" \
466 "bl v7_flush_dcache_"__stringify(level)" \n\t" \
467 "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
468 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
469 "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
470 "isb \n\t" \
471 "dsb \n\t" \
472 "ldmfd sp!, {fp, ip}" \
473 : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
474 "r9","r10","lr","memory" )
475
476 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
477 void *kaddr, unsigned long len);
478
479
480 #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
481 void check_cpu_icache_size(int cpuid);
482 #else
483 static inline void check_cpu_icache_size(int cpuid) { }
484 #endif
485
486 #endif