This source file includes following definitions.
- flush_icache_range
- flush_cache_mm
- flush_cache_page
- flush_cache_range
- __flush_icache_all
- flush_cache_vmap
- flush_cache_vunmap
1
2
3
4
5
6
7
8 #ifndef __ASM_CACHEFLUSH_H
9 #define __ASM_CACHEFLUSH_H
10
11 #include <linux/kgdb.h>
12 #include <linux/mm.h>
13
14
15
16
17
18 #define PG_dcache_clean PG_arch_1
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64 extern void __flush_icache_range(unsigned long start, unsigned long end);
65 extern int invalidate_icache_range(unsigned long start, unsigned long end);
66 extern void __flush_dcache_area(void *addr, size_t len);
67 extern void __inval_dcache_area(void *addr, size_t len);
68 extern void __clean_dcache_area_poc(void *addr, size_t len);
69 extern void __clean_dcache_area_pop(void *addr, size_t len);
70 extern void __clean_dcache_area_pou(void *addr, size_t len);
71 extern long __flush_cache_user_range(unsigned long start, unsigned long end);
72 extern void sync_icache_aliases(void *kaddr, unsigned long len);
73
74 static inline void flush_icache_range(unsigned long start, unsigned long end)
75 {
76 __flush_icache_range(start, end);
77
78
79
80
81
82 #ifdef CONFIG_KGDB
83
84
85
86
87
88
89
90
91
92 if (kgdb_connected && irqs_disabled())
93 return;
94 #endif
95 kick_all_cpus_sync();
96 }
97
98 static inline void flush_cache_mm(struct mm_struct *mm)
99 {
100 }
101
102 static inline void flush_cache_page(struct vm_area_struct *vma,
103 unsigned long user_addr, unsigned long pfn)
104 {
105 }
106
107 static inline void flush_cache_range(struct vm_area_struct *vma,
108 unsigned long start, unsigned long end)
109 {
110 }
111
112
113
114
115 extern void __dma_map_area(const void *, size_t, int);
116 extern void __dma_unmap_area(const void *, size_t, int);
117 extern void __dma_flush_area(const void *, size_t);
118
119
120
121
122
123
124 extern void copy_to_user_page(struct vm_area_struct *, struct page *,
125 unsigned long, void *, const void *, unsigned long);
126 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
127 do { \
128 memcpy(dst, src, len); \
129 } while (0)
130
131 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
132
133
134
135
136
137
138
139
140
141
142
143
144
145 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
146 extern void flush_dcache_page(struct page *);
147
148 static inline void __flush_icache_all(void)
149 {
150 if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
151 return;
152
153 asm("ic ialluis");
154 dsb(ish);
155 }
156
157 #define flush_dcache_mmap_lock(mapping) do { } while (0)
158 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
159
160
161
162
163
164 #define flush_icache_page(vma,page) do { } while (0)
165
166
167
168
169 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
170 {
171 }
172
173 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
174 {
175 }
176
177 int set_memory_valid(unsigned long addr, int numpages, int enable);
178
179 int set_direct_map_invalid_noflush(struct page *page);
180 int set_direct_map_default_noflush(struct page *page);
181
182 #endif