This source file includes following definitions.
- __tlb_adjust_range
- __tlb_reset_range
- tlb_flush
- tlb_update_vma_flags
- tlb_end_vma
- tlb_flush
- tlb_update_vma_flags
- tlb_update_vma_flags
- tlb_flush_mmu_tlbonly
- tlb_remove_page_size
- __tlb_remove_page
- tlb_remove_page
- tlb_change_page_size
- tlb_get_unmap_shift
- tlb_get_unmap_size
- tlb_start_vma
- tlb_end_vma
1
2
3
4
5
6
7
8
9
10
11 #ifndef _ASM_GENERIC__TLB_H
12 #define _ASM_GENERIC__TLB_H
13
14 #include <linux/mmu_notifier.h>
15 #include <linux/swap.h>
16 #include <asm/pgalloc.h>
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
19
20
21
22
23
24
25 #ifndef nmi_uaccess_okay
26 # define nmi_uaccess_okay() true
27 #endif
28
29 #ifdef CONFIG_MMU
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174 struct mmu_table_batch {
175 struct rcu_head rcu;
176 unsigned int nr;
177 void *tables[0];
178 };
179
180 #define MAX_TABLE_BATCH \
181 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
182
183 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
184
185
186
187
188
189 #ifndef tlb_needs_table_invalidate
190 #define tlb_needs_table_invalidate() (true)
191 #endif
192
193 #else
194
195 #ifdef tlb_needs_table_invalidate
196 #error tlb_needs_table_invalidate() requires HAVE_RCU_TABLE_FREE
197 #endif
198
199 #endif
200
201
202 #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
203
204
205
206
207 #define MMU_GATHER_BUNDLE 8
208
209 struct mmu_gather_batch {
210 struct mmu_gather_batch *next;
211 unsigned int nr;
212 unsigned int max;
213 struct page *pages[0];
214 };
215
216 #define MAX_GATHER_BATCH \
217 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
218
219
220
221
222
223
224
225 #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
226
227 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
228 int page_size);
229 #endif
230
231
232
233
234
235 struct mmu_gather {
236 struct mm_struct *mm;
237
238 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
239 struct mmu_table_batch *batch;
240 #endif
241
242 unsigned long start;
243 unsigned long end;
244
245
246
247
248 unsigned int fullmm : 1;
249
250
251
252
253
254 unsigned int need_flush_all : 1;
255
256
257
258
259 unsigned int freed_tables : 1;
260
261
262
263
264 unsigned int cleared_ptes : 1;
265 unsigned int cleared_pmds : 1;
266 unsigned int cleared_puds : 1;
267 unsigned int cleared_p4ds : 1;
268
269
270
271
272 unsigned int vma_exec : 1;
273 unsigned int vma_huge : 1;
274
275 unsigned int batch_count;
276
277 #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
278 struct mmu_gather_batch *active;
279 struct mmu_gather_batch local;
280 struct page *__pages[MMU_GATHER_BUNDLE];
281
282 #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
283 unsigned int page_size;
284 #endif
285 #endif
286 };
287
288 void arch_tlb_gather_mmu(struct mmu_gather *tlb,
289 struct mm_struct *mm, unsigned long start, unsigned long end);
290 void tlb_flush_mmu(struct mmu_gather *tlb);
291 void arch_tlb_finish_mmu(struct mmu_gather *tlb,
292 unsigned long start, unsigned long end, bool force);
293
294 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
295 unsigned long address,
296 unsigned int range_size)
297 {
298 tlb->start = min(tlb->start, address);
299 tlb->end = max(tlb->end, address + range_size);
300 }
301
302 static inline void __tlb_reset_range(struct mmu_gather *tlb)
303 {
304 if (tlb->fullmm) {
305 tlb->start = tlb->end = ~0;
306 } else {
307 tlb->start = TASK_SIZE;
308 tlb->end = 0;
309 }
310 tlb->freed_tables = 0;
311 tlb->cleared_ptes = 0;
312 tlb->cleared_pmds = 0;
313 tlb->cleared_puds = 0;
314 tlb->cleared_p4ds = 0;
315
316
317
318
319
320 }
321
322 #ifdef CONFIG_MMU_GATHER_NO_RANGE
323
324 #if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
325 #error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
326 #endif
327
328
329
330
331
332
333
334
335
336 static inline void tlb_flush(struct mmu_gather *tlb)
337 {
338 if (tlb->end)
339 flush_tlb_mm(tlb->mm);
340 }
341
342 static inline void
343 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
344
345 #define tlb_end_vma tlb_end_vma
346 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
347
348 #else
349
350 #ifndef tlb_flush
351
352 #if defined(tlb_start_vma) || defined(tlb_end_vma)
353 #error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
354 #endif
355
356
357
358
359
360
361 static inline void tlb_flush(struct mmu_gather *tlb)
362 {
363 if (tlb->fullmm || tlb->need_flush_all) {
364 flush_tlb_mm(tlb->mm);
365 } else if (tlb->end) {
366 struct vm_area_struct vma = {
367 .vm_mm = tlb->mm,
368 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
369 (tlb->vma_huge ? VM_HUGETLB : 0),
370 };
371
372 flush_tlb_range(&vma, tlb->start, tlb->end);
373 }
374 }
375
376 static inline void
377 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
378 {
379
380
381
382
383
384
385
386
387
388
389
390 tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB);
391 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
392 }
393
394 #else
395
396 static inline void
397 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
398
399 #endif
400
401 #endif
402
403 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
404 {
405 if (!tlb->end)
406 return;
407
408 tlb_flush(tlb);
409 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
410 __tlb_reset_range(tlb);
411 }
412
413 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
414 struct page *page, int page_size)
415 {
416 if (__tlb_remove_page_size(tlb, page, page_size))
417 tlb_flush_mmu(tlb);
418 }
419
420 static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
421 {
422 return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
423 }
424
425
426
427
428
429 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
430 {
431 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
432 }
433
434 static inline void tlb_change_page_size(struct mmu_gather *tlb,
435 unsigned int page_size)
436 {
437 #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
438 if (tlb->page_size && tlb->page_size != page_size) {
439 if (!tlb->fullmm)
440 tlb_flush_mmu(tlb);
441 }
442
443 tlb->page_size = page_size;
444 #endif
445 }
446
447 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
448 {
449 if (tlb->cleared_ptes)
450 return PAGE_SHIFT;
451 if (tlb->cleared_pmds)
452 return PMD_SHIFT;
453 if (tlb->cleared_puds)
454 return PUD_SHIFT;
455 if (tlb->cleared_p4ds)
456 return P4D_SHIFT;
457
458 return PAGE_SHIFT;
459 }
460
461 static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
462 {
463 return 1UL << tlb_get_unmap_shift(tlb);
464 }
465
466
467
468
469
470
471 #ifndef tlb_start_vma
472 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
473 {
474 if (tlb->fullmm)
475 return;
476
477 tlb_update_vma_flags(tlb, vma);
478 flush_cache_range(vma, vma->vm_start, vma->vm_end);
479 }
480 #endif
481
482 #ifndef tlb_end_vma
483 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
484 {
485 if (tlb->fullmm)
486 return;
487
488
489
490
491
492
493
494 tlb_flush_mmu_tlbonly(tlb);
495 }
496 #endif
497
498 #ifndef __tlb_remove_tlb_entry
499 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
500 #endif
501
502
503
504
505
506
507
508
509 #define tlb_remove_tlb_entry(tlb, ptep, address) \
510 do { \
511 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
512 tlb->cleared_ptes = 1; \
513 __tlb_remove_tlb_entry(tlb, ptep, address); \
514 } while (0)
515
516 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
517 do { \
518 unsigned long _sz = huge_page_size(h); \
519 __tlb_adjust_range(tlb, address, _sz); \
520 if (_sz == PMD_SIZE) \
521 tlb->cleared_pmds = 1; \
522 else if (_sz == PUD_SIZE) \
523 tlb->cleared_puds = 1; \
524 __tlb_remove_tlb_entry(tlb, ptep, address); \
525 } while (0)
526
527
528
529
530
531 #ifndef __tlb_remove_pmd_tlb_entry
532 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
533 #endif
534
535 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
536 do { \
537 __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
538 tlb->cleared_pmds = 1; \
539 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
540 } while (0)
541
542
543
544
545
546 #ifndef __tlb_remove_pud_tlb_entry
547 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
548 #endif
549
550 #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
551 do { \
552 __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
553 tlb->cleared_puds = 1; \
554 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
555 } while (0)
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575 #ifndef pte_free_tlb
576 #define pte_free_tlb(tlb, ptep, address) \
577 do { \
578 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
579 tlb->freed_tables = 1; \
580 tlb->cleared_pmds = 1; \
581 __pte_free_tlb(tlb, ptep, address); \
582 } while (0)
583 #endif
584
585 #ifndef pmd_free_tlb
586 #define pmd_free_tlb(tlb, pmdp, address) \
587 do { \
588 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
589 tlb->freed_tables = 1; \
590 tlb->cleared_puds = 1; \
591 __pmd_free_tlb(tlb, pmdp, address); \
592 } while (0)
593 #endif
594
595 #ifndef __ARCH_HAS_4LEVEL_HACK
596 #ifndef pud_free_tlb
597 #define pud_free_tlb(tlb, pudp, address) \
598 do { \
599 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
600 tlb->freed_tables = 1; \
601 tlb->cleared_p4ds = 1; \
602 __pud_free_tlb(tlb, pudp, address); \
603 } while (0)
604 #endif
605 #endif
606
607 #ifndef __ARCH_HAS_5LEVEL_HACK
608 #ifndef p4d_free_tlb
609 #define p4d_free_tlb(tlb, pudp, address) \
610 do { \
611 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
612 tlb->freed_tables = 1; \
613 __p4d_free_tlb(tlb, pudp, address); \
614 } while (0)
615 #endif
616 #endif
617
618 #endif
619
620 #endif