This source file includes following definitions.
- mk_esid_data
- __mk_vsid_data
- mk_vsid_data
- assert_slb_presence
- slb_shadow_update
- slb_shadow_clear
- create_shadowed_slbe
- __slb_restore_bolted_realmode
- slb_restore_bolted_realmode
- slb_flush_all_realmode
- slb_flush_and_restore_bolted
- slb_save_contents
- slb_dump_contents
- slb_vmalloc_update
- preload_hit
- preload_add
- preload_age
- slb_setup_new_exec
- preload_new_slb_context
- switch_slb
- slb_set_size
- slb_initialize
- slb_cache_update
- alloc_slb_index
- slb_insert_entry
- slb_allocate_kernel
- slb_allocate_user
- do_slb_fault
- do_bad_slb_fault
1
2
3
4
5
6
7
8
9
10
11
12 #include <asm/asm-prototypes.h>
13 #include <asm/pgtable.h>
14 #include <asm/mmu.h>
15 #include <asm/mmu_context.h>
16 #include <asm/paca.h>
17 #include <asm/ppc-opcode.h>
18 #include <asm/cputable.h>
19 #include <asm/cacheflush.h>
20 #include <asm/smp.h>
21 #include <linux/compiler.h>
22 #include <linux/context_tracking.h>
23 #include <linux/mm_types.h>
24
25 #include <asm/udbg.h>
26 #include <asm/code-patching.h>
27
28 enum slb_index {
29 LINEAR_INDEX = 0,
30 KSTACK_INDEX = 1,
31 };
32
33 static long slb_allocate_user(struct mm_struct *mm, unsigned long ea);
34
35 #define slb_esid_mask(ssize) \
36 (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
37
38 static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
39 enum slb_index index)
40 {
41 return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
42 }
43
44 static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
45 unsigned long flags)
46 {
47 return (vsid << slb_vsid_shift(ssize)) | flags |
48 ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
49 }
50
51 static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
52 unsigned long flags)
53 {
54 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
55 }
56
57 static void assert_slb_presence(bool present, unsigned long ea)
58 {
59 #ifdef CONFIG_DEBUG_VM
60 unsigned long tmp;
61
62 WARN_ON_ONCE(mfmsr() & MSR_EE);
63
64 if (!cpu_has_feature(CPU_FTR_ARCH_206))
65 return;
66
67
68
69
70
71 ea &= ~((1UL << 28) - 1);
72 asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
73
74 WARN_ON(present == (tmp == 0));
75 #endif
76 }
77
78 static inline void slb_shadow_update(unsigned long ea, int ssize,
79 unsigned long flags,
80 enum slb_index index)
81 {
82 struct slb_shadow *p = get_slb_shadow();
83
84
85
86
87
88
89 WRITE_ONCE(p->save_area[index].esid, 0);
90 WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags)));
91 WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index)));
92 }
93
94 static inline void slb_shadow_clear(enum slb_index index)
95 {
96 WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index));
97 }
98
99 static inline void create_shadowed_slbe(unsigned long ea, int ssize,
100 unsigned long flags,
101 enum slb_index index)
102 {
103
104
105
106
107
108 slb_shadow_update(ea, ssize, flags, index);
109
110 assert_slb_presence(false, ea);
111 asm volatile("slbmte %0,%1" :
112 : "r" (mk_vsid_data(ea, ssize, flags)),
113 "r" (mk_esid_data(ea, ssize, index))
114 : "memory" );
115 }
116
117
118
119
120
121 void __slb_restore_bolted_realmode(void)
122 {
123 struct slb_shadow *p = get_slb_shadow();
124 enum slb_index index;
125
126
127 for (index = 0; index < SLB_NUM_BOLTED; index++) {
128 asm volatile("slbmte %0,%1" :
129 : "r" (be64_to_cpu(p->save_area[index].vsid)),
130 "r" (be64_to_cpu(p->save_area[index].esid)));
131 }
132
133 assert_slb_presence(true, local_paca->kstack);
134 }
135
136
137
138
139 void slb_restore_bolted_realmode(void)
140 {
141 __slb_restore_bolted_realmode();
142 get_paca()->slb_cache_ptr = 0;
143
144 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
145 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
146 }
147
148
149
150
151 void slb_flush_all_realmode(void)
152 {
153 asm volatile("slbmte %0,%0; slbia" : : "r" (0));
154 }
155
156
157
158
159
160 void slb_flush_and_restore_bolted(void)
161 {
162 struct slb_shadow *p = get_slb_shadow();
163
164 BUILD_BUG_ON(SLB_NUM_BOLTED != 2);
165
166 WARN_ON(!irqs_disabled());
167
168
169
170
171
172 hard_irq_disable();
173
174 asm volatile("isync\n"
175 "slbia\n"
176 "slbmte %0, %1\n"
177 "isync\n"
178 :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
179 "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
180 : "memory");
181 assert_slb_presence(true, get_paca()->kstack);
182
183 get_paca()->slb_cache_ptr = 0;
184
185 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
186 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
187 }
188
189 void slb_save_contents(struct slb_entry *slb_ptr)
190 {
191 int i;
192 unsigned long e, v;
193
194
195 get_paca()->slb_save_cache_ptr = get_paca()->slb_cache_ptr;
196
197 if (!slb_ptr)
198 return;
199
200 for (i = 0; i < mmu_slb_size; i++) {
201 asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i));
202 asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i));
203 slb_ptr->esid = e;
204 slb_ptr->vsid = v;
205 slb_ptr++;
206 }
207 }
208
209 void slb_dump_contents(struct slb_entry *slb_ptr)
210 {
211 int i, n;
212 unsigned long e, v;
213 unsigned long llp;
214
215 if (!slb_ptr)
216 return;
217
218 pr_err("SLB contents of cpu 0x%x\n", smp_processor_id());
219 pr_err("Last SLB entry inserted at slot %d\n", get_paca()->stab_rr);
220
221 for (i = 0; i < mmu_slb_size; i++) {
222 e = slb_ptr->esid;
223 v = slb_ptr->vsid;
224 slb_ptr++;
225
226 if (!e && !v)
227 continue;
228
229 pr_err("%02d %016lx %016lx\n", i, e, v);
230
231 if (!(e & SLB_ESID_V)) {
232 pr_err("\n");
233 continue;
234 }
235 llp = v & SLB_VSID_LLP;
236 if (v & SLB_VSID_B_1T) {
237 pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n",
238 GET_ESID_1T(e),
239 (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp);
240 } else {
241 pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n",
242 GET_ESID(e),
243 (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp);
244 }
245 }
246 pr_err("----------------------------------\n");
247
248
249 pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr);
250 pr_err("Valid SLB cache entries:\n");
251 n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES);
252 for (i = 0; i < n; i++)
253 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
254 pr_err("Rest of SLB cache entries:\n");
255 for (i = n; i < SLB_CACHE_ENTRIES; i++)
256 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
257 }
258
259 void slb_vmalloc_update(void)
260 {
261
262
263
264 slb_flush_and_restore_bolted();
265 }
266
267 static bool preload_hit(struct thread_info *ti, unsigned long esid)
268 {
269 unsigned char i;
270
271 for (i = 0; i < ti->slb_preload_nr; i++) {
272 unsigned char idx;
273
274 idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR;
275 if (esid == ti->slb_preload_esid[idx])
276 return true;
277 }
278 return false;
279 }
280
281 static bool preload_add(struct thread_info *ti, unsigned long ea)
282 {
283 unsigned char idx;
284 unsigned long esid;
285
286 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
287
288 if (ea & ESID_MASK_1T)
289 ea &= ESID_MASK_1T;
290 }
291
292 esid = ea >> SID_SHIFT;
293
294 if (preload_hit(ti, esid))
295 return false;
296
297 idx = (ti->slb_preload_tail + ti->slb_preload_nr) % SLB_PRELOAD_NR;
298 ti->slb_preload_esid[idx] = esid;
299 if (ti->slb_preload_nr == SLB_PRELOAD_NR)
300 ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR;
301 else
302 ti->slb_preload_nr++;
303
304 return true;
305 }
306
307 static void preload_age(struct thread_info *ti)
308 {
309 if (!ti->slb_preload_nr)
310 return;
311 ti->slb_preload_nr--;
312 ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR;
313 }
314
315 void slb_setup_new_exec(void)
316 {
317 struct thread_info *ti = current_thread_info();
318 struct mm_struct *mm = current->mm;
319 unsigned long exec = 0x10000000;
320
321 WARN_ON(irqs_disabled());
322
323
324
325
326
327 if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR)
328 return;
329
330 hard_irq_disable();
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348 if (!is_kernel_addr(exec)) {
349 if (preload_add(ti, exec))
350 slb_allocate_user(mm, exec);
351 }
352
353
354 if (!is_kernel_addr(mm->mmap_base)) {
355 if (preload_add(ti, mm->mmap_base))
356 slb_allocate_user(mm, mm->mmap_base);
357 }
358
359
360 asm volatile("isync" : : : "memory");
361
362 local_irq_enable();
363 }
364
365 void preload_new_slb_context(unsigned long start, unsigned long sp)
366 {
367 struct thread_info *ti = current_thread_info();
368 struct mm_struct *mm = current->mm;
369 unsigned long heap = mm->start_brk;
370
371 WARN_ON(irqs_disabled());
372
373
374 if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR)
375 return;
376
377 hard_irq_disable();
378
379
380 if (!is_kernel_addr(start)) {
381 if (preload_add(ti, start))
382 slb_allocate_user(mm, start);
383 }
384
385
386 if (!is_kernel_addr(sp)) {
387 if (preload_add(ti, sp))
388 slb_allocate_user(mm, sp);
389 }
390
391
392 if (heap && !is_kernel_addr(heap)) {
393 if (preload_add(ti, heap))
394 slb_allocate_user(mm, heap);
395 }
396
397
398 asm volatile("isync" : : : "memory");
399
400 local_irq_enable();
401 }
402
403
404
405 void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
406 {
407 struct thread_info *ti = task_thread_info(tsk);
408 unsigned char i;
409
410
411
412
413
414
415
416 hard_irq_disable();
417 asm volatile("isync" : : : "memory");
418 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
419
420
421
422
423
424
425 asm volatile(PPC_SLBIA(3));
426 } else {
427 unsigned long offset = get_paca()->slb_cache_ptr;
428
429 if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
430 offset <= SLB_CACHE_ENTRIES) {
431 unsigned long slbie_data = 0;
432
433 for (i = 0; i < offset; i++) {
434 unsigned long ea;
435
436 ea = (unsigned long)
437 get_paca()->slb_cache[i] << SID_SHIFT;
438
439
440
441
442
443
444 slbie_data = ea;
445 slbie_data |= user_segment_size(slbie_data)
446 << SLBIE_SSIZE_SHIFT;
447 slbie_data |= SLBIE_C;
448 asm volatile("slbie %0" : : "r" (slbie_data));
449 }
450
451
452 if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1)
453 asm volatile("slbie %0" : : "r" (slbie_data));
454
455 } else {
456 struct slb_shadow *p = get_slb_shadow();
457 unsigned long ksp_esid_data =
458 be64_to_cpu(p->save_area[KSTACK_INDEX].esid);
459 unsigned long ksp_vsid_data =
460 be64_to_cpu(p->save_area[KSTACK_INDEX].vsid);
461
462 asm volatile(PPC_SLBIA(1) "\n"
463 "slbmte %0,%1\n"
464 "isync"
465 :: "r"(ksp_vsid_data),
466 "r"(ksp_esid_data));
467
468 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
469 }
470
471 get_paca()->slb_cache_ptr = 0;
472 }
473 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
474
475 copy_mm_to_paca(mm);
476
477
478
479
480
481
482
483 tsk->thread.load_slb++;
484 if (!tsk->thread.load_slb) {
485 unsigned long pc = KSTK_EIP(tsk);
486
487 preload_age(ti);
488 preload_add(ti, pc);
489 }
490
491 for (i = 0; i < ti->slb_preload_nr; i++) {
492 unsigned char idx;
493 unsigned long ea;
494
495 idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR;
496 ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT;
497
498 slb_allocate_user(mm, ea);
499 }
500
501
502
503
504
505
506 asm volatile("isync" : : : "memory");
507 }
508
509 void slb_set_size(u16 size)
510 {
511 mmu_slb_size = size;
512 }
513
514 void slb_initialize(void)
515 {
516 unsigned long linear_llp, vmalloc_llp, io_llp;
517 unsigned long lflags;
518 static int slb_encoding_inited;
519 #ifdef CONFIG_SPARSEMEM_VMEMMAP
520 unsigned long vmemmap_llp;
521 #endif
522
523
524 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
525 io_llp = mmu_psize_defs[mmu_io_psize].sllp;
526 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
527 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
528 #ifdef CONFIG_SPARSEMEM_VMEMMAP
529 vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
530 #endif
531 if (!slb_encoding_inited) {
532 slb_encoding_inited = 1;
533 pr_devel("SLB: linear LLP = %04lx\n", linear_llp);
534 pr_devel("SLB: io LLP = %04lx\n", io_llp);
535 #ifdef CONFIG_SPARSEMEM_VMEMMAP
536 pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
537 #endif
538 }
539
540 get_paca()->stab_rr = SLB_NUM_BOLTED - 1;
541 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
542 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
543
544 lflags = SLB_VSID_KERNEL | linear_llp;
545
546
547 asm volatile("isync":::"memory");
548 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
549 asm volatile("isync; slbia; isync":::"memory");
550 create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX);
551
552
553
554
555
556
557
558 slb_shadow_clear(KSTACK_INDEX);
559 if (raw_smp_processor_id() != boot_cpuid &&
560 (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
561 create_shadowed_slbe(get_paca()->kstack,
562 mmu_kernel_ssize, lflags, KSTACK_INDEX);
563
564 asm volatile("isync":::"memory");
565 }
566
567 static void slb_cache_update(unsigned long esid_data)
568 {
569 int slb_cache_index;
570
571 if (cpu_has_feature(CPU_FTR_ARCH_300))
572 return;
573
574
575
576
577 slb_cache_index = local_paca->slb_cache_ptr;
578 if (slb_cache_index < SLB_CACHE_ENTRIES) {
579
580
581
582
583 local_paca->slb_cache[slb_cache_index++] = esid_data >> 28;
584 local_paca->slb_cache_ptr++;
585 } else {
586
587
588
589
590
591 local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
592 }
593 }
594
595 static enum slb_index alloc_slb_index(bool kernel)
596 {
597 enum slb_index index;
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615 if (local_paca->slb_used_bitmap != U32_MAX) {
616 index = ffz(local_paca->slb_used_bitmap);
617 local_paca->slb_used_bitmap |= 1U << index;
618 if (kernel)
619 local_paca->slb_kern_bitmap |= 1U << index;
620 } else {
621
622 index = local_paca->stab_rr;
623 if (index < (mmu_slb_size - 1))
624 index++;
625 else
626 index = SLB_NUM_BOLTED;
627 local_paca->stab_rr = index;
628 if (index < 32) {
629 if (kernel)
630 local_paca->slb_kern_bitmap |= 1U << index;
631 else
632 local_paca->slb_kern_bitmap &= ~(1U << index);
633 }
634 }
635 BUG_ON(index < SLB_NUM_BOLTED);
636
637 return index;
638 }
639
640 static long slb_insert_entry(unsigned long ea, unsigned long context,
641 unsigned long flags, int ssize, bool kernel)
642 {
643 unsigned long vsid;
644 unsigned long vsid_data, esid_data;
645 enum slb_index index;
646
647 vsid = get_vsid(context, ea, ssize);
648 if (!vsid)
649 return -EFAULT;
650
651
652
653
654
655
656
657
658
659
660 barrier();
661
662 index = alloc_slb_index(kernel);
663
664 vsid_data = __mk_vsid_data(vsid, ssize, flags);
665 esid_data = mk_esid_data(ea, ssize, index);
666
667
668
669
670
671
672
673 assert_slb_presence(false, ea);
674 asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
675
676 barrier();
677
678 if (!kernel)
679 slb_cache_update(esid_data);
680
681 return 0;
682 }
683
684 static long slb_allocate_kernel(unsigned long ea, unsigned long id)
685 {
686 unsigned long context;
687 unsigned long flags;
688 int ssize;
689
690 if (id == LINEAR_MAP_REGION_ID) {
691
692
693 if ((ea & EA_MASK) > (1UL << MAX_PHYSMEM_BITS))
694 return -EFAULT;
695
696 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp;
697
698 #ifdef CONFIG_SPARSEMEM_VMEMMAP
699 } else if (id == VMEMMAP_REGION_ID) {
700
701 if (ea >= H_VMEMMAP_END)
702 return -EFAULT;
703
704 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmemmap_psize].sllp;
705 #endif
706 } else if (id == VMALLOC_REGION_ID) {
707
708 if (ea >= H_VMALLOC_END)
709 return -EFAULT;
710
711 flags = local_paca->vmalloc_sllp;
712
713 } else if (id == IO_REGION_ID) {
714
715 if (ea >= H_KERN_IO_END)
716 return -EFAULT;
717
718 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
719
720 } else {
721 return -EFAULT;
722 }
723
724 ssize = MMU_SEGSIZE_1T;
725 if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
726 ssize = MMU_SEGSIZE_256M;
727
728 context = get_kernel_context(ea);
729
730 return slb_insert_entry(ea, context, flags, ssize, true);
731 }
732
733 static long slb_allocate_user(struct mm_struct *mm, unsigned long ea)
734 {
735 unsigned long context;
736 unsigned long flags;
737 int bpsize;
738 int ssize;
739
740
741
742
743
744 if (ea >= mm_ctx_slb_addr_limit(&mm->context))
745 return -EFAULT;
746
747 context = get_user_context(&mm->context, ea);
748 if (!context)
749 return -EFAULT;
750
751 if (unlikely(ea >= H_PGTABLE_RANGE)) {
752 WARN_ON(1);
753 return -EFAULT;
754 }
755
756 ssize = user_segment_size(ea);
757
758 bpsize = get_slice_psize(mm, ea);
759 flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp;
760
761 return slb_insert_entry(ea, context, flags, ssize, false);
762 }
763
764 long do_slb_fault(struct pt_regs *regs, unsigned long ea)
765 {
766 unsigned long id = get_region_id(ea);
767
768
769 VM_WARN_ON(mfmsr() & MSR_EE);
770
771 if (unlikely(!(regs->msr & MSR_RI)))
772 return -EINVAL;
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789 if (id >= LINEAR_MAP_REGION_ID) {
790 long err;
791 #ifdef CONFIG_DEBUG_VM
792
793 BUG_ON(local_paca->in_kernel_slb_handler);
794 local_paca->in_kernel_slb_handler = 1;
795 #endif
796 err = slb_allocate_kernel(ea, id);
797 #ifdef CONFIG_DEBUG_VM
798 local_paca->in_kernel_slb_handler = 0;
799 #endif
800 return err;
801 } else {
802 struct mm_struct *mm = current->mm;
803 long err;
804
805 if (unlikely(!mm))
806 return -EFAULT;
807
808 err = slb_allocate_user(mm, ea);
809 if (!err)
810 preload_add(current_thread_info(), ea);
811
812 return err;
813 }
814 }
815
816 void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err)
817 {
818 if (err == -EFAULT) {
819 if (user_mode(regs))
820 _exception(SIGSEGV, regs, SEGV_BNDERR, ea);
821 else
822 bad_page_fault(regs, ea, SIGSEGV);
823 } else if (err == -EINVAL) {
824 unrecoverable_exception(regs);
825 } else {
826 BUG();
827 }
828 }