This source file includes following definitions.
- v_block_mapped
- p_block_mapped
- find_free_bat
- block_size
- setibat
- clearibat
- __mmu_mapin_ram
- mmu_mapin_ram
- mmu_mark_initmem_nx
- mmu_mark_rodata_ro
- setbat
- hash_preload
- update_mmu_cache
- MMU_init_hw
- MMU_init_hw_patch
- setup_initial_memory_limit
- print_system_hash_info
- setup_kuep
- setup_kuap
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 #include <linux/kernel.h>
21 #include <linux/mm.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/memblock.h>
25
26 #include <asm/prom.h>
27 #include <asm/mmu.h>
28 #include <asm/machdep.h>
29 #include <asm/code-patching.h>
30 #include <asm/sections.h>
31
32 #include <mm/mmu_decl.h>
33
34 struct hash_pte *Hash;
35 static unsigned long Hash_size, Hash_mask;
36 unsigned long _SDR1;
37 static unsigned int hash_mb, hash_mb2;
38
39 struct ppc_bat BATS[8][2];
40
41 struct batrange {
42 unsigned long start;
43 unsigned long limit;
44 phys_addr_t phys;
45 } bat_addrs[8];
46
47
48
49
50 phys_addr_t v_block_mapped(unsigned long va)
51 {
52 int b;
53 for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
54 if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
55 return bat_addrs[b].phys + (va - bat_addrs[b].start);
56 return 0;
57 }
58
59
60
61
62 unsigned long p_block_mapped(phys_addr_t pa)
63 {
64 int b;
65 for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
66 if (pa >= bat_addrs[b].phys
67 && pa < (bat_addrs[b].limit-bat_addrs[b].start)
68 +bat_addrs[b].phys)
69 return bat_addrs[b].start+(pa-bat_addrs[b].phys);
70 return 0;
71 }
72
73 static int find_free_bat(void)
74 {
75 int b;
76
77 if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) {
78 for (b = 0; b < 4; b++) {
79 struct ppc_bat *bat = BATS[b];
80
81 if (!(bat[0].batl & 0x40))
82 return b;
83 }
84 } else {
85 int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
86
87 for (b = 0; b < n; b++) {
88 struct ppc_bat *bat = BATS[b];
89
90 if (!(bat[1].batu & 3))
91 return b;
92 }
93 }
94 return -1;
95 }
96
97
98
99
100
101
102
103
104
105
106
107 static unsigned int block_size(unsigned long base, unsigned long top)
108 {
109 unsigned int max_size = IS_ENABLED(CONFIG_PPC_BOOK3S_601) ? SZ_8M : SZ_256M;
110 unsigned int base_shift = (ffs(base) - 1) & 31;
111 unsigned int block_shift = (fls(top - base) - 1) & 31;
112
113 return min3(max_size, 1U << base_shift, 1U << block_shift);
114 }
115
116
117
118
119
120
121
122 static void setibat(int index, unsigned long virt, phys_addr_t phys,
123 unsigned int size, pgprot_t prot)
124 {
125 unsigned int bl = (size >> 17) - 1;
126 int wimgxpp;
127 struct ppc_bat *bat = BATS[index];
128 unsigned long flags = pgprot_val(prot);
129
130 if (!cpu_has_feature(CPU_FTR_NEED_COHERENT))
131 flags &= ~_PAGE_COHERENT;
132
133 wimgxpp = (flags & _PAGE_COHERENT) | (_PAGE_EXEC ? BPP_RX : BPP_XX);
134 bat[0].batu = virt | (bl << 2) | 2;
135 bat[0].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
136 if (flags & _PAGE_USER)
137 bat[0].batu |= 1;
138 }
139
140 static void clearibat(int index)
141 {
142 struct ppc_bat *bat = BATS[index];
143
144 bat[0].batu = 0;
145 bat[0].batl = 0;
146 }
147
148 static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long top)
149 {
150 int idx;
151
152 while ((idx = find_free_bat()) != -1 && base != top) {
153 unsigned int size = block_size(base, top);
154
155 if (size < 128 << 10)
156 break;
157 setbat(idx, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X);
158 base += size;
159 }
160
161 return base;
162 }
163
164 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
165 {
166 unsigned long done;
167 unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
168
169 if (__map_without_bats) {
170 pr_debug("RAM mapped without BATs\n");
171 return base;
172 }
173
174 if (!strict_kernel_rwx_enabled() || base >= border || top <= border)
175 return __mmu_mapin_ram(base, top);
176
177 done = __mmu_mapin_ram(base, border);
178 if (done != border)
179 return done;
180
181 return __mmu_mapin_ram(border, top);
182 }
183
184 void mmu_mark_initmem_nx(void)
185 {
186 int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
187 int i;
188 unsigned long base = (unsigned long)_stext - PAGE_OFFSET;
189 unsigned long top = (unsigned long)_etext - PAGE_OFFSET;
190 unsigned long size;
191
192 if (IS_ENABLED(CONFIG_PPC_BOOK3S_601))
193 return;
194
195 for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) {
196 size = block_size(base, top);
197 setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
198 base += size;
199 }
200 if (base < top) {
201 size = block_size(base, top);
202 size = max(size, 128UL << 10);
203 if ((top - base) > size) {
204 if (strict_kernel_rwx_enabled())
205 pr_warn("Kernel _etext not properly aligned\n");
206 size <<= 1;
207 }
208 setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
209 base += size;
210 }
211 for (; i < nb; i++)
212 clearibat(i);
213
214 update_bats();
215
216 for (i = TASK_SIZE >> 28; i < 16; i++) {
217
218 if (IS_ENABLED(CONFIG_MODULES) &&
219 (VMALLOC_START & 0xf0000000) == i << 28)
220 break;
221 mtsrin(mfsrin(i << 28) | 0x10000000, i << 28);
222 }
223 }
224
225 void mmu_mark_rodata_ro(void)
226 {
227 int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
228 int i;
229
230 if (IS_ENABLED(CONFIG_PPC_BOOK3S_601))
231 return;
232
233 for (i = 0; i < nb; i++) {
234 struct ppc_bat *bat = BATS[i];
235
236 if (bat_addrs[i].start < (unsigned long)__init_begin)
237 bat[1].batl = (bat[1].batl & ~BPP_RW) | BPP_RX;
238 }
239
240 update_bats();
241 }
242
243
244
245
246
247
248
249 void __init setbat(int index, unsigned long virt, phys_addr_t phys,
250 unsigned int size, pgprot_t prot)
251 {
252 unsigned int bl;
253 int wimgxpp;
254 struct ppc_bat *bat = BATS[index];
255 unsigned long flags = pgprot_val(prot);
256
257 if ((flags & _PAGE_NO_CACHE) ||
258 (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0))
259 flags &= ~_PAGE_COHERENT;
260
261 bl = (size >> 17) - 1;
262 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_601)) {
263
264
265 wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
266 | _PAGE_COHERENT | _PAGE_GUARDED);
267 wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
268 bat[1].batu = virt | (bl << 2) | 2;
269 bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
270 if (flags & _PAGE_USER)
271 bat[1].batu |= 1;
272 if (flags & _PAGE_GUARDED) {
273
274 flags &= ~_PAGE_EXEC;
275 }
276 if (flags & _PAGE_EXEC)
277 bat[0] = bat[1];
278 else
279 bat[0].batu = bat[0].batl = 0;
280 } else {
281
282 if (bl > BL_8M)
283 bl = BL_8M;
284 wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
285 | _PAGE_COHERENT);
286 wimgxpp |= (flags & _PAGE_RW)?
287 ((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX;
288 bat->batu = virt | wimgxpp | 4;
289 bat->batl = phys | bl | 0x40;
290 }
291
292 bat_addrs[index].start = virt;
293 bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
294 bat_addrs[index].phys = phys;
295 }
296
297
298
299
300 void hash_preload(struct mm_struct *mm, unsigned long ea)
301 {
302 pmd_t *pmd;
303
304 if (!Hash)
305 return;
306 pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea);
307 if (!pmd_none(*pmd))
308 add_hash_page(mm->context.id, ea, pmd_val(*pmd));
309 }
310
311
312
313
314
315
316
317
318
319 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
320 pte_t *ptep)
321 {
322 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
323 return;
324
325
326
327
328
329
330 if (!pte_young(*ptep) || address >= TASK_SIZE)
331 return;
332
333
334 if (!current->thread.regs)
335 return;
336
337
338 if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400)
339 return;
340
341 hash_preload(vma->vm_mm, address);
342 }
343
344
345
346
347 void __init MMU_init_hw(void)
348 {
349 unsigned int n_hpteg, lg_n_hpteg;
350
351 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
352 return;
353
354 if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
355
356 #define LG_HPTEG_SIZE 6
357 #define SDR1_LOW_BITS ((n_hpteg - 1) >> 10)
358 #define MIN_N_HPTEG 1024
359
360
361
362
363
364
365 n_hpteg = total_memory / (PAGE_SIZE * 8);
366 if (n_hpteg < MIN_N_HPTEG)
367 n_hpteg = MIN_N_HPTEG;
368 lg_n_hpteg = __ilog2(n_hpteg);
369 if (n_hpteg & (n_hpteg - 1)) {
370 ++lg_n_hpteg;
371 n_hpteg = 1 << lg_n_hpteg;
372 }
373 Hash_size = n_hpteg << LG_HPTEG_SIZE;
374
375
376
377
378 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
379 Hash = memblock_alloc(Hash_size, Hash_size);
380 if (!Hash)
381 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
382 __func__, Hash_size, Hash_size);
383 _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
384
385 pr_info("Total memory = %lldMB; using %ldkB for hash table\n",
386 (unsigned long long)(total_memory >> 20), Hash_size >> 10);
387
388
389 Hash_mask = n_hpteg - 1;
390 hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
391 if (lg_n_hpteg > 16)
392 hash_mb2 = 16 - LG_HPTEG_SIZE;
393
394
395
396
397
398 if (IS_ENABLED(CONFIG_KASAN))
399 return;
400
401 MMU_init_hw_patch();
402 }
403
404 void __init MMU_init_hw_patch(void)
405 {
406 unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
407
408 if (ppc_md.progress)
409 ppc_md.progress("hash:patch", 0x345);
410 if (ppc_md.progress)
411 ppc_md.progress("hash:done", 0x205);
412
413
414
415
416
417
418 modify_instruction_site(&patch__hash_page_A0, 0xffff,
419 ((unsigned int)Hash - PAGE_OFFSET) >> 16);
420 modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
421 modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
422 modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
423 modify_instruction_site(&patch__hash_page_C, 0xffff, hmask);
424
425
426
427
428 modify_instruction_site(&patch__flush_hash_A0, 0xffff,
429 ((unsigned int)Hash - PAGE_OFFSET) >> 16);
430 modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6);
431 modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6);
432 modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
433 }
434
435 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
436 phys_addr_t first_memblock_size)
437 {
438
439
440
441 BUG_ON(first_memblock_base != 0);
442
443
444 if (IS_ENABLED(CONFIG_PPC_BOOK3S_601))
445 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000));
446 else
447 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000));
448 }
449
450 void __init print_system_hash_info(void)
451 {
452 pr_info("Hash_size = 0x%lx\n", Hash_size);
453 if (Hash_mask)
454 pr_info("Hash_mask = 0x%lx\n", Hash_mask);
455 }
456
457 #ifdef CONFIG_PPC_KUEP
458 void __init setup_kuep(bool disabled)
459 {
460 pr_info("Activating Kernel Userspace Execution Prevention\n");
461
462 if (disabled)
463 pr_warn("KUEP cannot be disabled yet on 6xx when compiled in\n");
464 }
465 #endif
466
467 #ifdef CONFIG_PPC_KUAP
468 void __init setup_kuap(bool disabled)
469 {
470 pr_info("Activating Kernel Userspace Access Protection\n");
471
472 if (disabled)
473 pr_warn("KUAP cannot be disabled yet on 6xx when compiled in\n");
474 }
475 #endif