This source file includes following definitions.
- arch_enter_lazy_mmu_mode
- arch_leave_lazy_mmu_mode
- hash__local_flush_tlb_mm
- hash__flush_tlb_mm
- hash__local_flush_all_mm
- hash__flush_all_mm
- hash__local_flush_tlb_page
- hash__flush_tlb_page
- hash__flush_tlb_range
- hash__flush_tlb_kernel_range
1
2 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
3 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
4
5
6
7
8
9 #include <linux/percpu.h>
10 #include <asm/page.h>
11
12 #define PPC64_TLB_BATCH_NR 192
13
14 struct ppc64_tlb_batch {
15 int active;
16 unsigned long index;
17 struct mm_struct *mm;
18 real_pte_t pte[PPC64_TLB_BATCH_NR];
19 unsigned long vpn[PPC64_TLB_BATCH_NR];
20 unsigned int psize;
21 int ssize;
22 };
23 DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
24
25 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
26
27 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
28
29 static inline void arch_enter_lazy_mmu_mode(void)
30 {
31 struct ppc64_tlb_batch *batch;
32
33 if (radix_enabled())
34 return;
35 batch = this_cpu_ptr(&ppc64_tlb_batch);
36 batch->active = 1;
37 }
38
39 static inline void arch_leave_lazy_mmu_mode(void)
40 {
41 struct ppc64_tlb_batch *batch;
42
43 if (radix_enabled())
44 return;
45 batch = this_cpu_ptr(&ppc64_tlb_batch);
46
47 if (batch->index)
48 __flush_tlb_pending(batch);
49 batch->active = 0;
50 }
51
52 #define arch_flush_lazy_mmu_mode() do {} while (0)
53
54 extern void hash__tlbiel_all(unsigned int action);
55
56 extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
57 int ssize, unsigned long flags);
58 extern void flush_hash_range(unsigned long number, int local);
59 extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
60 pmd_t *pmdp, unsigned int psize, int ssize,
61 unsigned long flags);
62 static inline void hash__local_flush_tlb_mm(struct mm_struct *mm)
63 {
64 }
65
66 static inline void hash__flush_tlb_mm(struct mm_struct *mm)
67 {
68 }
69
70 static inline void hash__local_flush_all_mm(struct mm_struct *mm)
71 {
72
73
74
75
76
77
78 WARN_ON_ONCE(1);
79 }
80
81 static inline void hash__flush_all_mm(struct mm_struct *mm)
82 {
83
84
85
86
87
88
89 WARN_ON_ONCE(1);
90 }
91
92 static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
93 unsigned long vmaddr)
94 {
95 }
96
97 static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
98 unsigned long vmaddr)
99 {
100 }
101
102 static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
103 unsigned long start, unsigned long end)
104 {
105 }
106
107 static inline void hash__flush_tlb_kernel_range(unsigned long start,
108 unsigned long end)
109 {
110 }
111
112
113 struct mmu_gather;
114 extern void hash__tlb_flush(struct mmu_gather *tlb);
115
116 extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
117 unsigned long end);
118 extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
119 unsigned long addr);
120 #endif