This source file includes following definitions.
- mm_has_notifiers
- mmu_notifier_get
- mmu_notifier_range_blockable
- mmu_notifier_release
- mmu_notifier_clear_flush_young
- mmu_notifier_clear_young
- mmu_notifier_test_young
- mmu_notifier_change_pte
- mmu_notifier_invalidate_range_start
- mmu_notifier_invalidate_range_start_nonblock
- mmu_notifier_invalidate_range_end
- mmu_notifier_invalidate_range_only_end
- mmu_notifier_invalidate_range
- mmu_notifier_mm_init
- mmu_notifier_mm_destroy
- mmu_notifier_range_init
- _mmu_notifier_range_init
- mmu_notifier_range_blockable
- mm_has_notifiers
- mmu_notifier_release
- mmu_notifier_clear_flush_young
- mmu_notifier_test_young
- mmu_notifier_change_pte
- mmu_notifier_invalidate_range_start
- mmu_notifier_invalidate_range_start_nonblock
- mmu_notifier_invalidate_range_end
- mmu_notifier_invalidate_range_only_end
- mmu_notifier_invalidate_range
- mmu_notifier_mm_init
- mmu_notifier_mm_destroy
- mmu_notifier_synchronize
1
2 #ifndef _LINUX_MMU_NOTIFIER_H
3 #define _LINUX_MMU_NOTIFIER_H
4
5 #include <linux/list.h>
6 #include <linux/spinlock.h>
7 #include <linux/mm_types.h>
8 #include <linux/srcu.h>
9
10 struct mmu_notifier;
11 struct mmu_notifier_ops;
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 enum mmu_notifier_event {
36 MMU_NOTIFY_UNMAP = 0,
37 MMU_NOTIFY_CLEAR,
38 MMU_NOTIFY_PROTECTION_VMA,
39 MMU_NOTIFY_PROTECTION_PAGE,
40 MMU_NOTIFY_SOFT_DIRTY,
41 };
42
43 #ifdef CONFIG_MMU_NOTIFIER
44
45 #ifdef CONFIG_LOCKDEP
46 extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
47 #endif
48
49
50
51
52
53
54
55 struct mmu_notifier_mm {
56
57 struct hlist_head list;
58
59 spinlock_t lock;
60 };
61
62 #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
63
64 struct mmu_notifier_range {
65 struct vm_area_struct *vma;
66 struct mm_struct *mm;
67 unsigned long start;
68 unsigned long end;
69 unsigned flags;
70 enum mmu_notifier_event event;
71 };
72
73 struct mmu_notifier_ops {
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97 void (*release)(struct mmu_notifier *mn,
98 struct mm_struct *mm);
99
100
101
102
103
104
105
106
107
108
109 int (*clear_flush_young)(struct mmu_notifier *mn,
110 struct mm_struct *mm,
111 unsigned long start,
112 unsigned long end);
113
114
115
116
117
118
119 int (*clear_young)(struct mmu_notifier *mn,
120 struct mm_struct *mm,
121 unsigned long start,
122 unsigned long end);
123
124
125
126
127
128
129
130 int (*test_young)(struct mmu_notifier *mn,
131 struct mm_struct *mm,
132 unsigned long address);
133
134
135
136
137
138 void (*change_pte)(struct mmu_notifier *mn,
139 struct mm_struct *mm,
140 unsigned long address,
141 pte_t pte);
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193 int (*invalidate_range_start)(struct mmu_notifier *mn,
194 const struct mmu_notifier_range *range);
195 void (*invalidate_range_end)(struct mmu_notifier *mn,
196 const struct mmu_notifier_range *range);
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
217 unsigned long start, unsigned long end);
218
219
220
221
222
223
224
225
226
227
228
229 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
230 void (*free_notifier)(struct mmu_notifier *mn);
231 };
232
233
234
235
236
237
238
239
240
241
242
243
244 struct mmu_notifier {
245 struct hlist_node hlist;
246 const struct mmu_notifier_ops *ops;
247 struct mm_struct *mm;
248 struct rcu_head rcu;
249 unsigned int users;
250 };
251
252 static inline int mm_has_notifiers(struct mm_struct *mm)
253 {
254 return unlikely(mm->mmu_notifier_mm);
255 }
256
257 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
258 struct mm_struct *mm);
259 static inline struct mmu_notifier *
260 mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
261 {
262 struct mmu_notifier *ret;
263
264 down_write(&mm->mmap_sem);
265 ret = mmu_notifier_get_locked(ops, mm);
266 up_write(&mm->mmap_sem);
267 return ret;
268 }
269 void mmu_notifier_put(struct mmu_notifier *mn);
270 void mmu_notifier_synchronize(void);
271
272 extern int mmu_notifier_register(struct mmu_notifier *mn,
273 struct mm_struct *mm);
274 extern int __mmu_notifier_register(struct mmu_notifier *mn,
275 struct mm_struct *mm);
276 extern void mmu_notifier_unregister(struct mmu_notifier *mn,
277 struct mm_struct *mm);
278 extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
279 extern void __mmu_notifier_release(struct mm_struct *mm);
280 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
281 unsigned long start,
282 unsigned long end);
283 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
284 unsigned long start,
285 unsigned long end);
286 extern int __mmu_notifier_test_young(struct mm_struct *mm,
287 unsigned long address);
288 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
289 unsigned long address, pte_t pte);
290 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
291 extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
292 bool only_end);
293 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
294 unsigned long start, unsigned long end);
295 extern bool
296 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
297
298 static inline bool
299 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
300 {
301 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
302 }
303
304 static inline void mmu_notifier_release(struct mm_struct *mm)
305 {
306 if (mm_has_notifiers(mm))
307 __mmu_notifier_release(mm);
308 }
309
310 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
311 unsigned long start,
312 unsigned long end)
313 {
314 if (mm_has_notifiers(mm))
315 return __mmu_notifier_clear_flush_young(mm, start, end);
316 return 0;
317 }
318
319 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
320 unsigned long start,
321 unsigned long end)
322 {
323 if (mm_has_notifiers(mm))
324 return __mmu_notifier_clear_young(mm, start, end);
325 return 0;
326 }
327
328 static inline int mmu_notifier_test_young(struct mm_struct *mm,
329 unsigned long address)
330 {
331 if (mm_has_notifiers(mm))
332 return __mmu_notifier_test_young(mm, address);
333 return 0;
334 }
335
336 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
337 unsigned long address, pte_t pte)
338 {
339 if (mm_has_notifiers(mm))
340 __mmu_notifier_change_pte(mm, address, pte);
341 }
342
343 static inline void
344 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
345 {
346 might_sleep();
347
348 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
349 if (mm_has_notifiers(range->mm)) {
350 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
351 __mmu_notifier_invalidate_range_start(range);
352 }
353 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
354 }
355
356 static inline int
357 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
358 {
359 int ret = 0;
360
361 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
362 if (mm_has_notifiers(range->mm)) {
363 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
364 ret = __mmu_notifier_invalidate_range_start(range);
365 }
366 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
367 return ret;
368 }
369
370 static inline void
371 mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
372 {
373 if (mmu_notifier_range_blockable(range))
374 might_sleep();
375
376 if (mm_has_notifiers(range->mm))
377 __mmu_notifier_invalidate_range_end(range, false);
378 }
379
380 static inline void
381 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
382 {
383 if (mm_has_notifiers(range->mm))
384 __mmu_notifier_invalidate_range_end(range, true);
385 }
386
387 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
388 unsigned long start, unsigned long end)
389 {
390 if (mm_has_notifiers(mm))
391 __mmu_notifier_invalidate_range(mm, start, end);
392 }
393
394 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
395 {
396 mm->mmu_notifier_mm = NULL;
397 }
398
399 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
400 {
401 if (mm_has_notifiers(mm))
402 __mmu_notifier_mm_destroy(mm);
403 }
404
405
406 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
407 enum mmu_notifier_event event,
408 unsigned flags,
409 struct vm_area_struct *vma,
410 struct mm_struct *mm,
411 unsigned long start,
412 unsigned long end)
413 {
414 range->vma = vma;
415 range->event = event;
416 range->mm = mm;
417 range->start = start;
418 range->end = end;
419 range->flags = flags;
420 }
421
422 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
423 ({ \
424 int __young; \
425 struct vm_area_struct *___vma = __vma; \
426 unsigned long ___address = __address; \
427 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
428 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
429 ___address, \
430 ___address + \
431 PAGE_SIZE); \
432 __young; \
433 })
434
435 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
436 ({ \
437 int __young; \
438 struct vm_area_struct *___vma = __vma; \
439 unsigned long ___address = __address; \
440 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
441 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
442 ___address, \
443 ___address + \
444 PMD_SIZE); \
445 __young; \
446 })
447
448 #define ptep_clear_young_notify(__vma, __address, __ptep) \
449 ({ \
450 int __young; \
451 struct vm_area_struct *___vma = __vma; \
452 unsigned long ___address = __address; \
453 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
454 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
455 ___address + PAGE_SIZE); \
456 __young; \
457 })
458
459 #define pmdp_clear_young_notify(__vma, __address, __pmdp) \
460 ({ \
461 int __young; \
462 struct vm_area_struct *___vma = __vma; \
463 unsigned long ___address = __address; \
464 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
465 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
466 ___address + PMD_SIZE); \
467 __young; \
468 })
469
470 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
471 ({ \
472 unsigned long ___addr = __address & PAGE_MASK; \
473 struct mm_struct *___mm = (__vma)->vm_mm; \
474 pte_t ___pte; \
475 \
476 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
477 mmu_notifier_invalidate_range(___mm, ___addr, \
478 ___addr + PAGE_SIZE); \
479 \
480 ___pte; \
481 })
482
483 #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
484 ({ \
485 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
486 struct mm_struct *___mm = (__vma)->vm_mm; \
487 pmd_t ___pmd; \
488 \
489 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
490 mmu_notifier_invalidate_range(___mm, ___haddr, \
491 ___haddr + HPAGE_PMD_SIZE); \
492 \
493 ___pmd; \
494 })
495
496 #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
497 ({ \
498 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
499 struct mm_struct *___mm = (__vma)->vm_mm; \
500 pud_t ___pud; \
501 \
502 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
503 mmu_notifier_invalidate_range(___mm, ___haddr, \
504 ___haddr + HPAGE_PUD_SIZE); \
505 \
506 ___pud; \
507 })
508
509
510
511
512
513
514
515
516
517
518
519 #define set_pte_at_notify(__mm, __address, __ptep, __pte) \
520 ({ \
521 struct mm_struct *___mm = __mm; \
522 unsigned long ___address = __address; \
523 pte_t ___pte = __pte; \
524 \
525 mmu_notifier_change_pte(___mm, ___address, ___pte); \
526 set_pte_at(___mm, ___address, __ptep, ___pte); \
527 })
528
529 #else
530
531 struct mmu_notifier_range {
532 unsigned long start;
533 unsigned long end;
534 };
535
536 static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
537 unsigned long start,
538 unsigned long end)
539 {
540 range->start = start;
541 range->end = end;
542 }
543
544 #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
545 _mmu_notifier_range_init(range, start, end)
546
547 static inline bool
548 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
549 {
550 return true;
551 }
552
553 static inline int mm_has_notifiers(struct mm_struct *mm)
554 {
555 return 0;
556 }
557
558 static inline void mmu_notifier_release(struct mm_struct *mm)
559 {
560 }
561
562 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
563 unsigned long start,
564 unsigned long end)
565 {
566 return 0;
567 }
568
569 static inline int mmu_notifier_test_young(struct mm_struct *mm,
570 unsigned long address)
571 {
572 return 0;
573 }
574
575 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
576 unsigned long address, pte_t pte)
577 {
578 }
579
580 static inline void
581 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
582 {
583 }
584
585 static inline int
586 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
587 {
588 return 0;
589 }
590
591 static inline
592 void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
593 {
594 }
595
596 static inline void
597 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
598 {
599 }
600
601 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
602 unsigned long start, unsigned long end)
603 {
604 }
605
606 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
607 {
608 }
609
610 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
611 {
612 }
613
614 #define mmu_notifier_range_update_to_read_only(r) false
615
616 #define ptep_clear_flush_young_notify ptep_clear_flush_young
617 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
618 #define ptep_clear_young_notify ptep_test_and_clear_young
619 #define pmdp_clear_young_notify pmdp_test_and_clear_young
620 #define ptep_clear_flush_notify ptep_clear_flush
621 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
622 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
623 #define set_pte_at_notify set_pte_at
624
625 static inline void mmu_notifier_synchronize(void)
626 {
627 }
628
629 #endif
630
631 #endif