This source file includes following definitions.
- get_cpu_asid_bits
- verify_cpu_asid_bits
- flush_context
- check_update_reserved_asid
- new_context
- check_and_switch_context
- post_ttbr_update_workaround
- asids_init
1
2
3
4
5
6
7
8
9 #include <linux/bitops.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/mm.h>
13
14 #include <asm/cpufeature.h>
15 #include <asm/mmu_context.h>
16 #include <asm/smp.h>
17 #include <asm/tlbflush.h>
18
19 static u32 asid_bits;
20 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
21
22 static atomic64_t asid_generation;
23 static unsigned long *asid_map;
24
25 static DEFINE_PER_CPU(atomic64_t, active_asids);
26 static DEFINE_PER_CPU(u64, reserved_asids);
27 static cpumask_t tlb_flush_pending;
28
29 #define ASID_MASK (~GENMASK(asid_bits - 1, 0))
30 #define ASID_FIRST_VERSION (1UL << asid_bits)
31
32 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
33 #define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1)
34 #define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1)
35 #define idx2asid(idx) (((idx) << 1) & ~ASID_MASK)
36 #else
37 #define NUM_USER_ASIDS (ASID_FIRST_VERSION)
38 #define asid2idx(asid) ((asid) & ~ASID_MASK)
39 #define idx2asid(idx) asid2idx(idx)
40 #endif
41
42
43 static u32 get_cpu_asid_bits(void)
44 {
45 u32 asid;
46 int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
47 ID_AA64MMFR0_ASID_SHIFT);
48
49 switch (fld) {
50 default:
51 pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
52 smp_processor_id(), fld);
53
54 case 0:
55 asid = 8;
56 break;
57 case 2:
58 asid = 16;
59 }
60
61 return asid;
62 }
63
64
65 void verify_cpu_asid_bits(void)
66 {
67 u32 asid = get_cpu_asid_bits();
68
69 if (asid < asid_bits) {
70
71
72
73
74 pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
75 smp_processor_id(), asid, asid_bits);
76 cpu_panic_kernel();
77 }
78 }
79
80 static void flush_context(void)
81 {
82 int i;
83 u64 asid;
84
85
86 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
87
88 for_each_possible_cpu(i) {
89 asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
90
91
92
93
94
95
96
97 if (asid == 0)
98 asid = per_cpu(reserved_asids, i);
99 __set_bit(asid2idx(asid), asid_map);
100 per_cpu(reserved_asids, i) = asid;
101 }
102
103
104
105
106
107 cpumask_setall(&tlb_flush_pending);
108 }
109
110 static bool check_update_reserved_asid(u64 asid, u64 newasid)
111 {
112 int cpu;
113 bool hit = false;
114
115
116
117
118
119
120
121
122
123
124 for_each_possible_cpu(cpu) {
125 if (per_cpu(reserved_asids, cpu) == asid) {
126 hit = true;
127 per_cpu(reserved_asids, cpu) = newasid;
128 }
129 }
130
131 return hit;
132 }
133
134 static u64 new_context(struct mm_struct *mm)
135 {
136 static u32 cur_idx = 1;
137 u64 asid = atomic64_read(&mm->context.id);
138 u64 generation = atomic64_read(&asid_generation);
139
140 if (asid != 0) {
141 u64 newasid = generation | (asid & ~ASID_MASK);
142
143
144
145
146
147 if (check_update_reserved_asid(asid, newasid))
148 return newasid;
149
150
151
152
153
154 if (!__test_and_set_bit(asid2idx(asid), asid_map))
155 return newasid;
156 }
157
158
159
160
161
162
163
164
165 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
166 if (asid != NUM_USER_ASIDS)
167 goto set_asid;
168
169
170 generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
171 &asid_generation);
172 flush_context();
173
174
175 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
176
177 set_asid:
178 __set_bit(asid, asid_map);
179 cur_idx = asid;
180 return idx2asid(asid) | generation;
181 }
182
183 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
184 {
185 unsigned long flags;
186 u64 asid, old_active_asid;
187
188 if (system_supports_cnp())
189 cpu_set_reserved_ttbr0();
190
191 asid = atomic64_read(&mm->context.id);
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207 old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
208 if (old_active_asid &&
209 !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) &&
210 atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
211 old_active_asid, asid))
212 goto switch_mm_fastpath;
213
214 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
215
216 asid = atomic64_read(&mm->context.id);
217 if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
218 asid = new_context(mm);
219 atomic64_set(&mm->context.id, asid);
220 }
221
222 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
223 local_flush_tlb_all();
224
225 atomic64_set(&per_cpu(active_asids, cpu), asid);
226 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
227
228 switch_mm_fastpath:
229
230 arm64_apply_bp_hardening();
231
232
233
234
235
236 if (!system_uses_ttbr0_pan())
237 cpu_switch_mm(mm->pgd, mm);
238 }
239
240
241 asmlinkage void post_ttbr_update_workaround(void)
242 {
243 asm(ALTERNATIVE("nop; nop; nop",
244 "ic iallu; dsb nsh; isb",
245 ARM64_WORKAROUND_CAVIUM_27456,
246 CONFIG_CAVIUM_ERRATUM_27456));
247 }
248
249 static int asids_init(void)
250 {
251 asid_bits = get_cpu_asid_bits();
252
253
254
255
256 WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
257 atomic64_set(&asid_generation, ASID_FIRST_VERSION);
258 asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
259 GFP_KERNEL);
260 if (!asid_map)
261 panic("Failed to allocate bitmap for %lu ASIDs\n",
262 NUM_USER_ASIDS);
263
264 pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
265 return 0;
266 }
267 early_initcall(asids_init);