This source file includes following definitions.
- asid_versions_eq
- get_new_mmu_context
- check_mmu_context
- flush_context
- check_update_reserved_mmid
- get_new_mmid
- check_switch_mmu_context
- mmid_init
1
2 #include <linux/atomic.h>
3 #include <linux/mmu_context.h>
4 #include <linux/percpu.h>
5 #include <linux/spinlock.h>
6
7 static DEFINE_RAW_SPINLOCK(cpu_mmid_lock);
8
9 static atomic64_t mmid_version;
10 static unsigned int num_mmids;
11 static unsigned long *mmid_map;
12
13 static DEFINE_PER_CPU(u64, reserved_mmids);
14 static cpumask_t tlb_flush_pending;
15
16 static bool asid_versions_eq(int cpu, u64 a, u64 b)
17 {
18 return ((a ^ b) & asid_version_mask(cpu)) == 0;
19 }
20
21 void get_new_mmu_context(struct mm_struct *mm)
22 {
23 unsigned int cpu;
24 u64 asid;
25
26
27
28
29
30 if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
31 return;
32
33 cpu = smp_processor_id();
34 asid = asid_cache(cpu);
35
36 if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
37 if (cpu_has_vtag_icache)
38 flush_icache_all();
39 local_flush_tlb_all();
40 }
41
42 set_cpu_context(cpu, mm, asid);
43 asid_cache(cpu) = asid;
44 }
45 EXPORT_SYMBOL_GPL(get_new_mmu_context);
46
47 void check_mmu_context(struct mm_struct *mm)
48 {
49 unsigned int cpu = smp_processor_id();
50
51
52
53
54
55 if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
56 return;
57
58
59 if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
60 get_new_mmu_context(mm);
61 }
62 EXPORT_SYMBOL_GPL(check_mmu_context);
63
64 static void flush_context(void)
65 {
66 u64 mmid;
67 int cpu;
68
69
70 bitmap_clear(mmid_map, 0, num_mmids);
71
72
73 __set_bit(MMID_KERNEL_WIRED, mmid_map);
74
75 for_each_possible_cpu(cpu) {
76 mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);
77
78
79
80
81
82
83
84
85 if (mmid == 0)
86 mmid = per_cpu(reserved_mmids, cpu);
87
88 __set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);
89 per_cpu(reserved_mmids, cpu) = mmid;
90 }
91
92
93
94
95
96 cpumask_setall(&tlb_flush_pending);
97 }
98
99 static bool check_update_reserved_mmid(u64 mmid, u64 newmmid)
100 {
101 bool hit;
102 int cpu;
103
104
105
106
107
108
109
110
111
112
113 hit = false;
114 for_each_possible_cpu(cpu) {
115 if (per_cpu(reserved_mmids, cpu) == mmid) {
116 hit = true;
117 per_cpu(reserved_mmids, cpu) = newmmid;
118 }
119 }
120
121 return hit;
122 }
123
124 static u64 get_new_mmid(struct mm_struct *mm)
125 {
126 static u32 cur_idx = MMID_KERNEL_WIRED + 1;
127 u64 mmid, version, mmid_mask;
128
129 mmid = cpu_context(0, mm);
130 version = atomic64_read(&mmid_version);
131 mmid_mask = cpu_asid_mask(&boot_cpu_data);
132
133 if (!asid_versions_eq(0, mmid, 0)) {
134 u64 newmmid = version | (mmid & mmid_mask);
135
136
137
138
139
140 if (check_update_reserved_mmid(mmid, newmmid)) {
141 mmid = newmmid;
142 goto set_context;
143 }
144
145
146
147
148
149 if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) {
150 mmid = newmmid;
151 goto set_context;
152 }
153 }
154
155
156 mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx);
157 if (mmid != num_mmids)
158 goto reserve_mmid;
159
160
161 version = atomic64_add_return_relaxed(asid_first_version(0),
162 &mmid_version);
163
164
165 flush_context();
166
167
168 mmid = find_first_zero_bit(mmid_map, num_mmids);
169
170 reserve_mmid:
171 __set_bit(mmid, mmid_map);
172 cur_idx = mmid;
173 mmid |= version;
174 set_context:
175 set_cpu_context(0, mm, mmid);
176 return mmid;
177 }
178
179 void check_switch_mmu_context(struct mm_struct *mm)
180 {
181 unsigned int cpu = smp_processor_id();
182 u64 ctx, old_active_mmid;
183 unsigned long flags;
184
185 if (!cpu_has_mmid) {
186 check_mmu_context(mm);
187 write_c0_entryhi(cpu_asid(cpu, mm));
188 goto setup_pgd;
189 }
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209 ctx = cpu_context(cpu, mm);
210 old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);
211 if (!old_active_mmid ||
212 !asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) ||
213 !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {
214 raw_spin_lock_irqsave(&cpu_mmid_lock, flags);
215
216 ctx = cpu_context(cpu, mm);
217 if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)))
218 ctx = get_new_mmid(mm);
219
220 WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);
221 raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags);
222 }
223
224
225
226
227
228
229 if (cpumask_test_cpu(cpu, &tlb_flush_pending)) {
230 if (cpu_has_vtag_icache)
231 flush_icache_all();
232 local_flush_tlb_all();
233 cpumask_clear_cpu(cpu, &tlb_flush_pending);
234 }
235
236 write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data));
237
238
239
240
241
242
243
244
245
246
247 #ifdef CONFIG_SMP
248 if (cpu_has_shared_ftlb_entries &&
249 cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) {
250
251 mtc0_tlbw_hazard();
252
253
254
255
256
257 ginvt_mmid();
258 sync_ginv();
259 }
260 #endif
261
262 setup_pgd:
263 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
264 }
265 EXPORT_SYMBOL_GPL(check_switch_mmu_context);
266
267 static int mmid_init(void)
268 {
269 if (!cpu_has_mmid)
270 return 0;
271
272
273
274
275
276 num_mmids = asid_first_version(0);
277 WARN_ON(num_mmids <= num_possible_cpus());
278
279 atomic64_set(&mmid_version, asid_first_version(0));
280 mmid_map = kcalloc(BITS_TO_LONGS(num_mmids), sizeof(*mmid_map),
281 GFP_KERNEL);
282 if (!mmid_map)
283 panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids);
284
285
286 __set_bit(MMID_KERNEL_WIRED, mmid_map);
287
288 pr_info("MMID allocator initialised with %u entries\n", num_mmids);
289 return 0;
290 }
291 early_initcall(mmid_init);