This source file includes following definitions.
- hash__vmemmap_create_mapping
- hash__vmemmap_remove_mapping
- hash__map_kernel_page
- hash__pmd_hugepage_update
- hash__pmdp_collapse_flush
- hash__pgtable_trans_huge_deposit
- hash__pgtable_trans_huge_withdraw
- hpte_do_hugepage_flush
- hash__pmdp_huge_get_and_clear
- hash__has_transparent_hugepage
- hash__change_memory_range
- hash__mark_rodata_ro
- hash__mark_initmem_nx
1
2
3
4
5
6
7
8 #include <linux/sched.h>
9 #include <linux/mm_types.h>
10 #include <linux/mm.h>
11
12 #include <asm/pgalloc.h>
13 #include <asm/pgtable.h>
14 #include <asm/sections.h>
15 #include <asm/mmu.h>
16 #include <asm/tlb.h>
17
18 #include <mm/mmu_decl.h>
19
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/thp.h>
22
23 #if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE))
24 #warning Limited user VSID range means pagetable space is wasted
25 #endif
26
27 #ifdef CONFIG_SPARSEMEM_VMEMMAP
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107 int __meminit hash__vmemmap_create_mapping(unsigned long start,
108 unsigned long page_size,
109 unsigned long phys)
110 {
111 int rc;
112
113 if ((start + page_size) >= H_VMEMMAP_END) {
114 pr_warn("Outside the supported range\n");
115 return -1;
116 }
117
118 rc = htab_bolt_mapping(start, start + page_size, phys,
119 pgprot_val(PAGE_KERNEL),
120 mmu_vmemmap_psize, mmu_kernel_ssize);
121 if (rc < 0) {
122 int rc2 = htab_remove_mapping(start, start + page_size,
123 mmu_vmemmap_psize,
124 mmu_kernel_ssize);
125 BUG_ON(rc2 && (rc2 != -ENOENT));
126 }
127 return rc;
128 }
129
130 #ifdef CONFIG_MEMORY_HOTPLUG
131 void hash__vmemmap_remove_mapping(unsigned long start,
132 unsigned long page_size)
133 {
134 int rc = htab_remove_mapping(start, start + page_size,
135 mmu_vmemmap_psize,
136 mmu_kernel_ssize);
137 BUG_ON((rc < 0) && (rc != -ENOENT));
138 WARN_ON(rc == -ENOENT);
139 }
140 #endif
141 #endif
142
143
144
145
146
147
148 int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
149 {
150 pgd_t *pgdp;
151 pud_t *pudp;
152 pmd_t *pmdp;
153 pte_t *ptep;
154
155 BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE);
156 if (slab_is_available()) {
157 pgdp = pgd_offset_k(ea);
158 pudp = pud_alloc(&init_mm, pgdp, ea);
159 if (!pudp)
160 return -ENOMEM;
161 pmdp = pmd_alloc(&init_mm, pudp, ea);
162 if (!pmdp)
163 return -ENOMEM;
164 ptep = pte_alloc_kernel(pmdp, ea);
165 if (!ptep)
166 return -ENOMEM;
167 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
168 } else {
169
170
171
172
173
174
175 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot),
176 mmu_io_psize, mmu_kernel_ssize)) {
177 printk(KERN_ERR "Failed to do bolted mapping IO "
178 "memory at %016lx !\n", pa);
179 return -ENOMEM;
180 }
181 }
182
183 smp_wmb();
184 return 0;
185 }
186
187 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
188
189 unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
190 pmd_t *pmdp, unsigned long clr,
191 unsigned long set)
192 {
193 __be64 old_be, tmp;
194 unsigned long old;
195
196 #ifdef CONFIG_DEBUG_VM
197 WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
198 assert_spin_locked(pmd_lockptr(mm, pmdp));
199 #endif
200
201 __asm__ __volatile__(
202 "1: ldarx %0,0,%3\n\
203 and. %1,%0,%6\n\
204 bne- 1b \n\
205 andc %1,%0,%4 \n\
206 or %1,%1,%7\n\
207 stdcx. %1,0,%3 \n\
208 bne- 1b"
209 : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp)
210 : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp),
211 "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
212 : "cc" );
213
214 old = be64_to_cpu(old_be);
215
216 trace_hugepage_update(addr, old, clr, set);
217 if (old & H_PAGE_HASHPTE)
218 hpte_do_hugepage_flush(mm, addr, pmdp, old);
219 return old;
220 }
221
222 pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
223 pmd_t *pmdp)
224 {
225 pmd_t pmd;
226
227 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
228 VM_BUG_ON(pmd_trans_huge(*pmdp));
229 VM_BUG_ON(pmd_devmap(*pmdp));
230
231 pmd = *pmdp;
232 pmd_clear(pmdp);
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248 serialize_against_pte_lookup(vma->vm_mm);
249
250
251
252
253
254
255
256
257
258 flush_tlb_pmd_range(vma->vm_mm, &pmd, address);
259 return pmd;
260 }
261
262
263
264
265
266 void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
267 pgtable_t pgtable)
268 {
269 pgtable_t *pgtable_slot;
270
271 assert_spin_locked(pmd_lockptr(mm, pmdp));
272
273
274
275 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
276 *pgtable_slot = pgtable;
277
278
279
280
281
282
283 smp_wmb();
284 }
285
286 pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
287 {
288 pgtable_t pgtable;
289 pgtable_t *pgtable_slot;
290
291 assert_spin_locked(pmd_lockptr(mm, pmdp));
292
293 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
294 pgtable = *pgtable_slot;
295
296
297
298 *pgtable_slot = NULL;
299
300
301
302
303 memset(pgtable, 0, PTE_FRAG_SIZE);
304 return pgtable;
305 }
306
307
308
309
310
311 void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
312 pmd_t *pmdp, unsigned long old_pmd)
313 {
314 int ssize;
315 unsigned int psize;
316 unsigned long vsid;
317 unsigned long flags = 0;
318
319
320 #ifdef CONFIG_DEBUG_VM
321 psize = get_slice_psize(mm, addr);
322 BUG_ON(psize == MMU_PAGE_16M);
323 #endif
324 if (old_pmd & H_PAGE_COMBO)
325 psize = MMU_PAGE_4K;
326 else
327 psize = MMU_PAGE_64K;
328
329 if (!is_kernel_addr(addr)) {
330 ssize = user_segment_size(addr);
331 vsid = get_user_vsid(&mm->context, addr, ssize);
332 WARN_ON(vsid == 0);
333 } else {
334 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
335 ssize = mmu_kernel_ssize;
336 }
337
338 if (mm_is_thread_local(mm))
339 flags |= HPTE_LOCAL_UPDATE;
340
341 return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
342 }
343
344 pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
345 unsigned long addr, pmd_t *pmdp)
346 {
347 pmd_t old_pmd;
348 pgtable_t pgtable;
349 unsigned long old;
350 pgtable_t *pgtable_slot;
351
352 old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
353 old_pmd = __pmd(old);
354
355
356
357
358
359 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
360 pgtable = *pgtable_slot;
361
362
363
364
365 memset(pgtable, 0, PTE_FRAG_SIZE);
366
367
368
369
370
371
372
373
374
375
376 serialize_against_pte_lookup(mm);
377 return old_pmd;
378 }
379
380 int hash__has_transparent_hugepage(void)
381 {
382
383 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
384 return 0;
385
386
387
388 if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT)
389 return 0;
390
391
392
393
394
395
396
397
398 if (mmu_psize_defs[MMU_PAGE_64K].shift &&
399 (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1))
400 return 0;
401
402
403
404 if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1)
405 return 0;
406
407 return 1;
408 }
409 EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage);
410
411 #endif
412
413 #ifdef CONFIG_STRICT_KERNEL_RWX
414 static bool hash__change_memory_range(unsigned long start, unsigned long end,
415 unsigned long newpp)
416 {
417 unsigned long idx;
418 unsigned int step, shift;
419
420 shift = mmu_psize_defs[mmu_linear_psize].shift;
421 step = 1 << shift;
422
423 start = ALIGN_DOWN(start, step);
424 end = ALIGN(end, step);
425
426 if (start >= end)
427 return false;
428
429 pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",
430 start, end, newpp, step);
431
432 for (idx = start; idx < end; idx += step)
433
434 mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,
435 mmu_kernel_ssize);
436
437 return true;
438 }
439
440 void hash__mark_rodata_ro(void)
441 {
442 unsigned long start, end;
443
444 start = (unsigned long)_stext;
445 end = (unsigned long)__init_begin;
446
447 WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
448 }
449
450 void hash__mark_initmem_nx(void)
451 {
452 unsigned long start, end, pp;
453
454 start = (unsigned long)__init_begin;
455 end = (unsigned long)__init_end;
456
457 pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
458
459 WARN_ON(!hash__change_memory_range(start, end, pp));
460 }
461 #endif