This source file includes following definitions.
- pmdp_set_access_flags
- pmdp_test_and_clear_young
- set_pmd_at
- do_nothing
- serialize_against_pte_lookup
- pmdp_invalidate
- pmd_set_protbits
- pfn_pmd
- mk_pmd
- pmd_modify
- update_mmu_cache_pmd
- mmu_cleanup_all
- create_section_mapping
- remove_section_mapping
- mmu_partition_table_init
- flush_partition
- mmu_partition_table_set_entry
- get_pmd_from_cache
- __alloc_for_pmdcache
- pmd_fragment_alloc
- pmd_fragment_free
- pgtable_free
- pgtable_free_tlb
- __tlb_remove_table
- arch_report_meminfo
- ptep_modify_prot_start
- ptep_modify_prot_commit
- pmd_move_must_withdraw
- setup_disable_tlbie
- pgtable_debugfs_setup
1
2
3
4
5
6 #include <linux/sched.h>
7 #include <linux/mm_types.h>
8 #include <linux/memblock.h>
9 #include <misc/cxl-base.h>
10
11 #include <asm/debugfs.h>
12 #include <asm/pgalloc.h>
13 #include <asm/tlb.h>
14 #include <asm/trace.h>
15 #include <asm/powernv.h>
16 #include <asm/firmware.h>
17 #include <asm/ultravisor.h>
18
19 #include <mm/mmu_decl.h>
20 #include <trace/events/thp.h>
21
22 unsigned long __pmd_frag_nr;
23 EXPORT_SYMBOL(__pmd_frag_nr);
24 unsigned long __pmd_frag_size_shift;
25 EXPORT_SYMBOL(__pmd_frag_size_shift);
26
27 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
28
29
30
31
32
33
34
35 int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
36 pmd_t *pmdp, pmd_t entry, int dirty)
37 {
38 int changed;
39 #ifdef CONFIG_DEBUG_VM
40 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
41 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
42 #endif
43 changed = !pmd_same(*(pmdp), entry);
44 if (changed) {
45
46
47
48
49 __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
50 pmd_pte(entry), address, MMU_PAGE_2M);
51 }
52 return changed;
53 }
54
55 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
56 unsigned long address, pmd_t *pmdp)
57 {
58 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
59 }
60
61
62
63
64 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
65 pmd_t *pmdp, pmd_t pmd)
66 {
67 #ifdef CONFIG_DEBUG_VM
68
69
70
71
72
73 WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
74 assert_spin_locked(pmd_lockptr(mm, pmdp));
75 WARN_ON(!(pmd_large(pmd)));
76 #endif
77 trace_hugepage_set_pmd(addr, pmd_val(pmd));
78 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
79 }
80
81 static void do_nothing(void *unused)
82 {
83
84 }
85
86
87
88
89
90
91
92
93
94
95 void serialize_against_pte_lookup(struct mm_struct *mm)
96 {
97 smp_mb();
98 smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
99 }
100
101
102
103
104
105 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
106 pmd_t *pmdp)
107 {
108 unsigned long old_pmd;
109
110 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
111 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
112
113
114
115
116
117
118
119 serialize_against_pte_lookup(vma->vm_mm);
120 return __pmd(old_pmd);
121 }
122
123 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
124 {
125 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
126 }
127
128 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
129 {
130 unsigned long pmdv;
131
132 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
133 return pmd_set_protbits(__pmd(pmdv), pgprot);
134 }
135
136 pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
137 {
138 return pfn_pmd(page_to_pfn(page), pgprot);
139 }
140
141 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
142 {
143 unsigned long pmdv;
144
145 pmdv = pmd_val(pmd);
146 pmdv &= _HPAGE_CHG_MASK;
147 return pmd_set_protbits(__pmd(pmdv), newprot);
148 }
149
150
151
152
153
154
155
156 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
157 pmd_t *pmd)
158 {
159 if (radix_enabled())
160 prefetch((void *)addr);
161 }
162 #endif
163
164
165 void mmu_cleanup_all(void)
166 {
167 if (radix_enabled())
168 radix__mmu_cleanup_all();
169 else if (mmu_hash_ops.hpte_clear_all)
170 mmu_hash_ops.hpte_clear_all();
171 }
172
173 #ifdef CONFIG_MEMORY_HOTPLUG
174 int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid)
175 {
176 if (radix_enabled())
177 return radix__create_section_mapping(start, end, nid);
178
179 return hash__create_section_mapping(start, end, nid);
180 }
181
182 int __meminit remove_section_mapping(unsigned long start, unsigned long end)
183 {
184 if (radix_enabled())
185 return radix__remove_section_mapping(start, end);
186
187 return hash__remove_section_mapping(start, end);
188 }
189 #endif
190
191 void __init mmu_partition_table_init(void)
192 {
193 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
194 unsigned long ptcr;
195
196 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
197
198 partition_tb = memblock_alloc(patb_size, patb_size);
199 if (!partition_tb)
200 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
201 __func__, patb_size, patb_size);
202
203
204
205
206
207 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
208 set_ptcr_when_no_uv(ptcr);
209 powernv_set_nmmu_ptcr(ptcr);
210 }
211
212 static void flush_partition(unsigned int lpid, bool radix)
213 {
214 if (radix) {
215 radix__flush_all_lpid(lpid);
216 radix__flush_all_lpid_guest(lpid);
217 } else {
218 asm volatile("ptesync" : : : "memory");
219 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
220 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
221
222 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
223 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
224 }
225 }
226
227 void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
228 unsigned long dw1, bool flush)
229 {
230 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
231
232
233
234
235
236
237
238
239
240
241 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
242 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
243
244
245
246
247
248
249
250
251 if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) {
252 uv_register_pate(lpid, dw0, dw1);
253 pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n",
254 dw0, dw1);
255 } else if (flush) {
256
257
258
259
260
261 flush_partition(lpid, (old & PATB_HR));
262 }
263 }
264 EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
265
266 static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
267 {
268 void *pmd_frag, *ret;
269
270 if (PMD_FRAG_NR == 1)
271 return NULL;
272
273 spin_lock(&mm->page_table_lock);
274 ret = mm->context.pmd_frag;
275 if (ret) {
276 pmd_frag = ret + PMD_FRAG_SIZE;
277
278
279
280 if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
281 pmd_frag = NULL;
282 mm->context.pmd_frag = pmd_frag;
283 }
284 spin_unlock(&mm->page_table_lock);
285 return (pmd_t *)ret;
286 }
287
288 static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
289 {
290 void *ret = NULL;
291 struct page *page;
292 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
293
294 if (mm == &init_mm)
295 gfp &= ~__GFP_ACCOUNT;
296 page = alloc_page(gfp);
297 if (!page)
298 return NULL;
299 if (!pgtable_pmd_page_ctor(page)) {
300 __free_pages(page, 0);
301 return NULL;
302 }
303
304 atomic_set(&page->pt_frag_refcount, 1);
305
306 ret = page_address(page);
307
308
309
310
311 if (PMD_FRAG_NR == 1)
312 return ret;
313
314 spin_lock(&mm->page_table_lock);
315
316
317
318
319
320 if (likely(!mm->context.pmd_frag)) {
321 atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
322 mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
323 }
324 spin_unlock(&mm->page_table_lock);
325
326 return (pmd_t *)ret;
327 }
328
329 pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
330 {
331 pmd_t *pmd;
332
333 pmd = get_pmd_from_cache(mm);
334 if (pmd)
335 return pmd;
336
337 return __alloc_for_pmdcache(mm);
338 }
339
340 void pmd_fragment_free(unsigned long *pmd)
341 {
342 struct page *page = virt_to_page(pmd);
343
344 BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
345 if (atomic_dec_and_test(&page->pt_frag_refcount)) {
346 pgtable_pmd_page_dtor(page);
347 __free_page(page);
348 }
349 }
350
351 static inline void pgtable_free(void *table, int index)
352 {
353 switch (index) {
354 case PTE_INDEX:
355 pte_fragment_free(table, 0);
356 break;
357 case PMD_INDEX:
358 pmd_fragment_free(table);
359 break;
360 case PUD_INDEX:
361 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
362 break;
363 #if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
364
365 case HTLB_16M_INDEX:
366 BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
367 kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
368 break;
369
370 case HTLB_16G_INDEX:
371 BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
372 kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
373 break;
374 #endif
375
376 default:
377 BUG();
378 }
379 }
380
381 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
382 {
383 unsigned long pgf = (unsigned long)table;
384
385 BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
386 pgf |= index;
387 tlb_remove_table(tlb, (void *)pgf);
388 }
389
390 void __tlb_remove_table(void *_table)
391 {
392 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
393 unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
394
395 return pgtable_free(table, index);
396 }
397
398 #ifdef CONFIG_PROC_FS
399 atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
400
401 void arch_report_meminfo(struct seq_file *m)
402 {
403
404
405
406
407 if (!radix_enabled())
408 return;
409 seq_printf(m, "DirectMap4k: %8lu kB\n",
410 atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
411 seq_printf(m, "DirectMap64k: %8lu kB\n",
412 atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
413 seq_printf(m, "DirectMap2M: %8lu kB\n",
414 atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
415 seq_printf(m, "DirectMap1G: %8lu kB\n",
416 atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
417 }
418 #endif
419
420 pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
421 pte_t *ptep)
422 {
423 unsigned long pte_val;
424
425
426
427
428
429
430 pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0);
431
432 return __pte(pte_val);
433
434 }
435
436 void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
437 pte_t *ptep, pte_t old_pte, pte_t pte)
438 {
439 if (radix_enabled())
440 return radix__ptep_modify_prot_commit(vma, addr,
441 ptep, old_pte, pte);
442 set_pte_at(vma->vm_mm, addr, ptep, pte);
443 }
444
445
446
447
448
449
450
451
452
453
454
455
456
457 int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
458 struct spinlock *old_pmd_ptl,
459 struct vm_area_struct *vma)
460 {
461 if (radix_enabled())
462 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
463
464 return true;
465 }
466
467
468
469
470 bool tlbie_capable __read_mostly = true;
471 EXPORT_SYMBOL(tlbie_capable);
472
473
474
475
476
477
478 bool tlbie_enabled __read_mostly = true;
479
480 static int __init setup_disable_tlbie(char *str)
481 {
482 if (!radix_enabled()) {
483 pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n");
484 return 1;
485 }
486
487 tlbie_capable = false;
488 tlbie_enabled = false;
489
490 return 1;
491 }
492 __setup("disable_tlbie", setup_disable_tlbie);
493
494 static int __init pgtable_debugfs_setup(void)
495 {
496 if (!tlbie_capable)
497 return 0;
498
499
500
501
502
503
504
505 debugfs_create_bool("tlbie_enabled", 0600,
506 powerpc_debugfs_root,
507 &tlbie_enabled);
508
509 return 0;
510 }
511 arch_initcall(pgtable_debugfs_setup);