This source file includes following definitions.
- tlb_next_batch
- tlb_batch_pages_flush
- tlb_batch_list_free
- __tlb_remove_page_size
- tlb_table_invalidate
- tlb_remove_table_smp_sync
- tlb_remove_table_one
- tlb_remove_table_rcu
- tlb_table_flush
- tlb_remove_table
- tlb_flush_mmu_free
- tlb_flush_mmu
- tlb_gather_mmu
- tlb_finish_mmu
1 #include <linux/gfp.h>
2 #include <linux/highmem.h>
3 #include <linux/kernel.h>
4 #include <linux/mmdebug.h>
5 #include <linux/mm_types.h>
6 #include <linux/pagemap.h>
7 #include <linux/rcupdate.h>
8 #include <linux/smp.h>
9 #include <linux/swap.h>
10
11 #include <asm/pgalloc.h>
12 #include <asm/tlb.h>
13
14 #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
15
16 static bool tlb_next_batch(struct mmu_gather *tlb)
17 {
18 struct mmu_gather_batch *batch;
19
20 batch = tlb->active;
21 if (batch->next) {
22 tlb->active = batch->next;
23 return true;
24 }
25
26 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
27 return false;
28
29 batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
30 if (!batch)
31 return false;
32
33 tlb->batch_count++;
34 batch->next = NULL;
35 batch->nr = 0;
36 batch->max = MAX_GATHER_BATCH;
37
38 tlb->active->next = batch;
39 tlb->active = batch;
40
41 return true;
42 }
43
44 static void tlb_batch_pages_flush(struct mmu_gather *tlb)
45 {
46 struct mmu_gather_batch *batch;
47
48 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
49 free_pages_and_swap_cache(batch->pages, batch->nr);
50 batch->nr = 0;
51 }
52 tlb->active = &tlb->local;
53 }
54
55 static void tlb_batch_list_free(struct mmu_gather *tlb)
56 {
57 struct mmu_gather_batch *batch, *next;
58
59 for (batch = tlb->local.next; batch; batch = next) {
60 next = batch->next;
61 free_pages((unsigned long)batch, 0);
62 }
63 tlb->local.next = NULL;
64 }
65
66 bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
67 {
68 struct mmu_gather_batch *batch;
69
70 VM_BUG_ON(!tlb->end);
71
72 #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
73 VM_WARN_ON(tlb->page_size != page_size);
74 #endif
75
76 batch = tlb->active;
77
78
79
80
81 batch->pages[batch->nr++] = page;
82 if (batch->nr == batch->max) {
83 if (!tlb_next_batch(tlb))
84 return true;
85 batch = tlb->active;
86 }
87 VM_BUG_ON_PAGE(batch->nr > batch->max, page);
88
89 return false;
90 }
91
92 #endif
93
94 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
95
96
97
98
99
100
101
102
103 static inline void tlb_table_invalidate(struct mmu_gather *tlb)
104 {
105 if (tlb_needs_table_invalidate()) {
106
107
108
109
110
111 tlb_flush_mmu_tlbonly(tlb);
112 }
113 }
114
115 static void tlb_remove_table_smp_sync(void *arg)
116 {
117
118 }
119
120 static void tlb_remove_table_one(void *table)
121 {
122
123
124
125
126
127
128
129 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
130 __tlb_remove_table(table);
131 }
132
133 static void tlb_remove_table_rcu(struct rcu_head *head)
134 {
135 struct mmu_table_batch *batch;
136 int i;
137
138 batch = container_of(head, struct mmu_table_batch, rcu);
139
140 for (i = 0; i < batch->nr; i++)
141 __tlb_remove_table(batch->tables[i]);
142
143 free_page((unsigned long)batch);
144 }
145
146 static void tlb_table_flush(struct mmu_gather *tlb)
147 {
148 struct mmu_table_batch **batch = &tlb->batch;
149
150 if (*batch) {
151 tlb_table_invalidate(tlb);
152 call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
153 *batch = NULL;
154 }
155 }
156
157 void tlb_remove_table(struct mmu_gather *tlb, void *table)
158 {
159 struct mmu_table_batch **batch = &tlb->batch;
160
161 if (*batch == NULL) {
162 *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
163 if (*batch == NULL) {
164 tlb_table_invalidate(tlb);
165 tlb_remove_table_one(table);
166 return;
167 }
168 (*batch)->nr = 0;
169 }
170
171 (*batch)->tables[(*batch)->nr++] = table;
172 if ((*batch)->nr == MAX_TABLE_BATCH)
173 tlb_table_flush(tlb);
174 }
175
176 #endif
177
178 static void tlb_flush_mmu_free(struct mmu_gather *tlb)
179 {
180 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
181 tlb_table_flush(tlb);
182 #endif
183 #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
184 tlb_batch_pages_flush(tlb);
185 #endif
186 }
187
188 void tlb_flush_mmu(struct mmu_gather *tlb)
189 {
190 tlb_flush_mmu_tlbonly(tlb);
191 tlb_flush_mmu_free(tlb);
192 }
193
194
195
196
197
198
199
200
201
202
203
204
205
206 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
207 unsigned long start, unsigned long end)
208 {
209 tlb->mm = mm;
210
211
212 tlb->fullmm = !(start | (end+1));
213
214 #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
215 tlb->need_flush_all = 0;
216 tlb->local.next = NULL;
217 tlb->local.nr = 0;
218 tlb->local.max = ARRAY_SIZE(tlb->__pages);
219 tlb->active = &tlb->local;
220 tlb->batch_count = 0;
221 #endif
222
223 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
224 tlb->batch = NULL;
225 #endif
226 #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
227 tlb->page_size = 0;
228 #endif
229
230 __tlb_reset_range(tlb);
231 inc_tlb_flush_pending(tlb->mm);
232 }
233
234
235
236
237
238
239
240
241
242
243 void tlb_finish_mmu(struct mmu_gather *tlb,
244 unsigned long start, unsigned long end)
245 {
246
247
248
249
250
251
252
253
254
255
256
257
258 if (mm_tlb_flush_nested(tlb->mm)) {
259
260
261
262
263
264
265
266
267 tlb->fullmm = 1;
268 __tlb_reset_range(tlb);
269 tlb->freed_tables = 1;
270 }
271
272 tlb_flush_mmu(tlb);
273
274 #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
275 tlb_batch_list_free(tlb);
276 #endif
277 dec_tlb_flush_pending(tlb->mm);
278 }