This source file includes following definitions.
- __mmu_notifier_release
- __mmu_notifier_clear_flush_young
- __mmu_notifier_clear_young
- __mmu_notifier_test_young
- __mmu_notifier_change_pte
- __mmu_notifier_invalidate_range_start
- __mmu_notifier_invalidate_range_end
- __mmu_notifier_invalidate_range
- __mmu_notifier_register
- mmu_notifier_register
- find_get_mmu_notifier
- mmu_notifier_get_locked
- __mmu_notifier_mm_destroy
- mmu_notifier_unregister
- mmu_notifier_free_rcu
- mmu_notifier_put
- mmu_notifier_synchronize
- mmu_notifier_range_update_to_read_only
1
2
3
4
5
6
7
8
9
10 #include <linux/rculist.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/export.h>
13 #include <linux/mm.h>
14 #include <linux/err.h>
15 #include <linux/srcu.h>
16 #include <linux/rcupdate.h>
17 #include <linux/sched.h>
18 #include <linux/sched/mm.h>
19 #include <linux/slab.h>
20
21
22 DEFINE_STATIC_SRCU(srcu);
23
24 #ifdef CONFIG_LOCKDEP
25 struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
26 .name = "mmu_notifier_invalidate_range_start"
27 };
28 #endif
29
30
31
32
33
34
35
36
37
38
39
40
41
42 void __mmu_notifier_release(struct mm_struct *mm)
43 {
44 struct mmu_notifier *mn;
45 int id;
46
47
48
49
50
51 id = srcu_read_lock(&srcu);
52 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
53
54
55
56
57
58
59 if (mn->ops->release)
60 mn->ops->release(mn, mm);
61
62 spin_lock(&mm->mmu_notifier_mm->lock);
63 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
64 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
65 struct mmu_notifier,
66 hlist);
67
68
69
70
71
72
73 hlist_del_init_rcu(&mn->hlist);
74 }
75 spin_unlock(&mm->mmu_notifier_mm->lock);
76 srcu_read_unlock(&srcu, id);
77
78
79
80
81
82
83
84
85
86
87 synchronize_srcu(&srcu);
88 }
89
90
91
92
93
94
95 int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
96 unsigned long start,
97 unsigned long end)
98 {
99 struct mmu_notifier *mn;
100 int young = 0, id;
101
102 id = srcu_read_lock(&srcu);
103 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
104 if (mn->ops->clear_flush_young)
105 young |= mn->ops->clear_flush_young(mn, mm, start, end);
106 }
107 srcu_read_unlock(&srcu, id);
108
109 return young;
110 }
111
112 int __mmu_notifier_clear_young(struct mm_struct *mm,
113 unsigned long start,
114 unsigned long end)
115 {
116 struct mmu_notifier *mn;
117 int young = 0, id;
118
119 id = srcu_read_lock(&srcu);
120 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
121 if (mn->ops->clear_young)
122 young |= mn->ops->clear_young(mn, mm, start, end);
123 }
124 srcu_read_unlock(&srcu, id);
125
126 return young;
127 }
128
129 int __mmu_notifier_test_young(struct mm_struct *mm,
130 unsigned long address)
131 {
132 struct mmu_notifier *mn;
133 int young = 0, id;
134
135 id = srcu_read_lock(&srcu);
136 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
137 if (mn->ops->test_young) {
138 young = mn->ops->test_young(mn, mm, address);
139 if (young)
140 break;
141 }
142 }
143 srcu_read_unlock(&srcu, id);
144
145 return young;
146 }
147
148 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
149 pte_t pte)
150 {
151 struct mmu_notifier *mn;
152 int id;
153
154 id = srcu_read_lock(&srcu);
155 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
156 if (mn->ops->change_pte)
157 mn->ops->change_pte(mn, mm, address, pte);
158 }
159 srcu_read_unlock(&srcu, id);
160 }
161
162 int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
163 {
164 struct mmu_notifier *mn;
165 int ret = 0;
166 int id;
167
168 id = srcu_read_lock(&srcu);
169 hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
170 if (mn->ops->invalidate_range_start) {
171 int _ret;
172
173 if (!mmu_notifier_range_blockable(range))
174 non_block_start();
175 _ret = mn->ops->invalidate_range_start(mn, range);
176 if (!mmu_notifier_range_blockable(range))
177 non_block_end();
178 if (_ret) {
179 pr_info("%pS callback failed with %d in %sblockable context.\n",
180 mn->ops->invalidate_range_start, _ret,
181 !mmu_notifier_range_blockable(range) ? "non-" : "");
182 WARN_ON(mmu_notifier_range_blockable(range) ||
183 _ret != -EAGAIN);
184 ret = _ret;
185 }
186 }
187 }
188 srcu_read_unlock(&srcu, id);
189
190 return ret;
191 }
192
193 void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
194 bool only_end)
195 {
196 struct mmu_notifier *mn;
197 int id;
198
199 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
200 id = srcu_read_lock(&srcu);
201 hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
202
203
204
205
206
207
208
209
210
211
212
213
214
215 if (!only_end && mn->ops->invalidate_range)
216 mn->ops->invalidate_range(mn, range->mm,
217 range->start,
218 range->end);
219 if (mn->ops->invalidate_range_end) {
220 if (!mmu_notifier_range_blockable(range))
221 non_block_start();
222 mn->ops->invalidate_range_end(mn, range);
223 if (!mmu_notifier_range_blockable(range))
224 non_block_end();
225 }
226 }
227 srcu_read_unlock(&srcu, id);
228 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
229 }
230
231 void __mmu_notifier_invalidate_range(struct mm_struct *mm,
232 unsigned long start, unsigned long end)
233 {
234 struct mmu_notifier *mn;
235 int id;
236
237 id = srcu_read_lock(&srcu);
238 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
239 if (mn->ops->invalidate_range)
240 mn->ops->invalidate_range(mn, mm, start, end);
241 }
242 srcu_read_unlock(&srcu, id);
243 }
244
245
246
247
248
249 int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
250 {
251 struct mmu_notifier_mm *mmu_notifier_mm = NULL;
252 int ret;
253
254 lockdep_assert_held_write(&mm->mmap_sem);
255 BUG_ON(atomic_read(&mm->mm_users) <= 0);
256
257 if (IS_ENABLED(CONFIG_LOCKDEP)) {
258 fs_reclaim_acquire(GFP_KERNEL);
259 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
260 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
261 fs_reclaim_release(GFP_KERNEL);
262 }
263
264 mn->mm = mm;
265 mn->users = 1;
266
267 if (!mm->mmu_notifier_mm) {
268
269
270
271
272
273 mmu_notifier_mm =
274 kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
275 if (!mmu_notifier_mm)
276 return -ENOMEM;
277
278 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
279 spin_lock_init(&mmu_notifier_mm->lock);
280 }
281
282 ret = mm_take_all_locks(mm);
283 if (unlikely(ret))
284 goto out_clean;
285
286
287 mmgrab(mm);
288
289
290
291
292
293
294
295
296
297 if (mmu_notifier_mm)
298 mm->mmu_notifier_mm = mmu_notifier_mm;
299
300 spin_lock(&mm->mmu_notifier_mm->lock);
301 hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
302 spin_unlock(&mm->mmu_notifier_mm->lock);
303
304 mm_drop_all_locks(mm);
305 BUG_ON(atomic_read(&mm->mm_users) <= 0);
306 return 0;
307
308 out_clean:
309 kfree(mmu_notifier_mm);
310 return ret;
311 }
312 EXPORT_SYMBOL_GPL(__mmu_notifier_register);
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333 int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
334 {
335 int ret;
336
337 down_write(&mm->mmap_sem);
338 ret = __mmu_notifier_register(mn, mm);
339 up_write(&mm->mmap_sem);
340 return ret;
341 }
342 EXPORT_SYMBOL_GPL(mmu_notifier_register);
343
344 static struct mmu_notifier *
345 find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
346 {
347 struct mmu_notifier *mn;
348
349 spin_lock(&mm->mmu_notifier_mm->lock);
350 hlist_for_each_entry_rcu (mn, &mm->mmu_notifier_mm->list, hlist) {
351 if (mn->ops != ops)
352 continue;
353
354 if (likely(mn->users != UINT_MAX))
355 mn->users++;
356 else
357 mn = ERR_PTR(-EOVERFLOW);
358 spin_unlock(&mm->mmu_notifier_mm->lock);
359 return mn;
360 }
361 spin_unlock(&mm->mmu_notifier_mm->lock);
362 return NULL;
363 }
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
383 struct mm_struct *mm)
384 {
385 struct mmu_notifier *mn;
386 int ret;
387
388 lockdep_assert_held_write(&mm->mmap_sem);
389
390 if (mm->mmu_notifier_mm) {
391 mn = find_get_mmu_notifier(mm, ops);
392 if (mn)
393 return mn;
394 }
395
396 mn = ops->alloc_notifier(mm);
397 if (IS_ERR(mn))
398 return mn;
399 mn->ops = ops;
400 ret = __mmu_notifier_register(mn, mm);
401 if (ret)
402 goto out_free;
403 return mn;
404 out_free:
405 mn->ops->free_notifier(mn);
406 return ERR_PTR(ret);
407 }
408 EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
409
410
411 void __mmu_notifier_mm_destroy(struct mm_struct *mm)
412 {
413 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
414 kfree(mm->mmu_notifier_mm);
415 mm->mmu_notifier_mm = LIST_POISON1;
416 }
417
418
419
420
421
422
423
424
425
426
427
428 void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
429 {
430 BUG_ON(atomic_read(&mm->mm_count) <= 0);
431
432 if (!hlist_unhashed(&mn->hlist)) {
433
434
435
436
437 int id;
438
439 id = srcu_read_lock(&srcu);
440
441
442
443
444 if (mn->ops->release)
445 mn->ops->release(mn, mm);
446 srcu_read_unlock(&srcu, id);
447
448 spin_lock(&mm->mmu_notifier_mm->lock);
449
450
451
452
453 hlist_del_init_rcu(&mn->hlist);
454 spin_unlock(&mm->mmu_notifier_mm->lock);
455 }
456
457
458
459
460
461 synchronize_srcu(&srcu);
462
463 BUG_ON(atomic_read(&mm->mm_count) <= 0);
464
465 mmdrop(mm);
466 }
467 EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
468
469 static void mmu_notifier_free_rcu(struct rcu_head *rcu)
470 {
471 struct mmu_notifier *mn = container_of(rcu, struct mmu_notifier, rcu);
472 struct mm_struct *mm = mn->mm;
473
474 mn->ops->free_notifier(mn);
475
476 mmdrop(mm);
477 }
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501 void mmu_notifier_put(struct mmu_notifier *mn)
502 {
503 struct mm_struct *mm = mn->mm;
504
505 spin_lock(&mm->mmu_notifier_mm->lock);
506 if (WARN_ON(!mn->users) || --mn->users)
507 goto out_unlock;
508 hlist_del_init_rcu(&mn->hlist);
509 spin_unlock(&mm->mmu_notifier_mm->lock);
510
511 call_srcu(&srcu, &mn->rcu, mmu_notifier_free_rcu);
512 return;
513
514 out_unlock:
515 spin_unlock(&mm->mmu_notifier_mm->lock);
516 }
517 EXPORT_SYMBOL_GPL(mmu_notifier_put);
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532 void mmu_notifier_synchronize(void)
533 {
534 synchronize_srcu(&srcu);
535 }
536 EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
537
538 bool
539 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
540 {
541 if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
542 return false;
543
544 return range->vma->vm_flags & VM_READ;
545 }
546 EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);