This source file includes following definitions.
- anon_vma_alloc
- anon_vma_free
- anon_vma_chain_alloc
- anon_vma_chain_free
- anon_vma_chain_link
- __anon_vma_prepare
- lock_anon_vma_root
- unlock_anon_vma_root
- anon_vma_clone
- anon_vma_fork
- unlink_anon_vmas
- anon_vma_ctor
- anon_vma_init
- page_get_anon_vma
- page_lock_anon_vma_read
- page_unlock_anon_vma_read
- try_to_unmap_flush
- try_to_unmap_flush_dirty
- set_tlb_ubc_flush_pending
- should_defer_flush
- flush_tlb_batched_pending
- set_tlb_ubc_flush_pending
- should_defer_flush
- page_address_in_vma
- mm_find_pmd
- page_referenced_one
- invalid_page_referenced_vma
- page_referenced
- page_mkclean_one
- invalid_mkclean_vma
- page_mkclean
- page_move_anon_rmap
- __page_set_anon_rmap
- __page_check_anon_rmap
- page_add_anon_rmap
- do_page_add_anon_rmap
- page_add_new_anon_rmap
- page_add_file_rmap
- page_remove_file_rmap
- page_remove_anon_compound_rmap
- page_remove_rmap
- try_to_unmap_one
- is_vma_temporary_stack
- invalid_migration_vma
- page_mapcount_is_zero
- try_to_unmap
- page_not_mapped
- try_to_munlock
- __put_anon_vma
- rmap_walk_anon_lock
- rmap_walk_anon
- rmap_walk_file
- rmap_walk
- rmap_walk_locked
- hugepage_add_anon_rmap
- hugepage_add_new_anon_rmap
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48 #include <linux/mm.h>
49 #include <linux/sched/mm.h>
50 #include <linux/sched/task.h>
51 #include <linux/pagemap.h>
52 #include <linux/swap.h>
53 #include <linux/swapops.h>
54 #include <linux/slab.h>
55 #include <linux/init.h>
56 #include <linux/ksm.h>
57 #include <linux/rmap.h>
58 #include <linux/rcupdate.h>
59 #include <linux/export.h>
60 #include <linux/memcontrol.h>
61 #include <linux/mmu_notifier.h>
62 #include <linux/migrate.h>
63 #include <linux/hugetlb.h>
64 #include <linux/huge_mm.h>
65 #include <linux/backing-dev.h>
66 #include <linux/page_idle.h>
67 #include <linux/memremap.h>
68 #include <linux/userfaultfd_k.h>
69
70 #include <asm/tlbflush.h>
71
72 #include <trace/events/tlb.h>
73
74 #include "internal.h"
75
76 static struct kmem_cache *anon_vma_cachep;
77 static struct kmem_cache *anon_vma_chain_cachep;
78
79 static inline struct anon_vma *anon_vma_alloc(void)
80 {
81 struct anon_vma *anon_vma;
82
83 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
84 if (anon_vma) {
85 atomic_set(&anon_vma->refcount, 1);
86 anon_vma->degree = 1;
87 anon_vma->parent = anon_vma;
88
89
90
91
92 anon_vma->root = anon_vma;
93 }
94
95 return anon_vma;
96 }
97
98 static inline void anon_vma_free(struct anon_vma *anon_vma)
99 {
100 VM_BUG_ON(atomic_read(&anon_vma->refcount));
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119 might_sleep();
120 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
121 anon_vma_lock_write(anon_vma);
122 anon_vma_unlock_write(anon_vma);
123 }
124
125 kmem_cache_free(anon_vma_cachep, anon_vma);
126 }
127
128 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
129 {
130 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
131 }
132
133 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
134 {
135 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
136 }
137
138 static void anon_vma_chain_link(struct vm_area_struct *vma,
139 struct anon_vma_chain *avc,
140 struct anon_vma *anon_vma)
141 {
142 avc->vma = vma;
143 avc->anon_vma = anon_vma;
144 list_add(&avc->same_vma, &vma->anon_vma_chain);
145 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
146 }
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176 int __anon_vma_prepare(struct vm_area_struct *vma)
177 {
178 struct mm_struct *mm = vma->vm_mm;
179 struct anon_vma *anon_vma, *allocated;
180 struct anon_vma_chain *avc;
181
182 might_sleep();
183
184 avc = anon_vma_chain_alloc(GFP_KERNEL);
185 if (!avc)
186 goto out_enomem;
187
188 anon_vma = find_mergeable_anon_vma(vma);
189 allocated = NULL;
190 if (!anon_vma) {
191 anon_vma = anon_vma_alloc();
192 if (unlikely(!anon_vma))
193 goto out_enomem_free_avc;
194 allocated = anon_vma;
195 }
196
197 anon_vma_lock_write(anon_vma);
198
199 spin_lock(&mm->page_table_lock);
200 if (likely(!vma->anon_vma)) {
201 vma->anon_vma = anon_vma;
202 anon_vma_chain_link(vma, avc, anon_vma);
203
204 anon_vma->degree++;
205 allocated = NULL;
206 avc = NULL;
207 }
208 spin_unlock(&mm->page_table_lock);
209 anon_vma_unlock_write(anon_vma);
210
211 if (unlikely(allocated))
212 put_anon_vma(allocated);
213 if (unlikely(avc))
214 anon_vma_chain_free(avc);
215
216 return 0;
217
218 out_enomem_free_avc:
219 anon_vma_chain_free(avc);
220 out_enomem:
221 return -ENOMEM;
222 }
223
224
225
226
227
228
229
230
231
232 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
233 {
234 struct anon_vma *new_root = anon_vma->root;
235 if (new_root != root) {
236 if (WARN_ON_ONCE(root))
237 up_write(&root->rwsem);
238 root = new_root;
239 down_write(&root->rwsem);
240 }
241 return root;
242 }
243
244 static inline void unlock_anon_vma_root(struct anon_vma *root)
245 {
246 if (root)
247 up_write(&root->rwsem);
248 }
249
250
251
252
253
254
255
256
257
258
259
260
261
262 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
263 {
264 struct anon_vma_chain *avc, *pavc;
265 struct anon_vma *root = NULL;
266
267 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
268 struct anon_vma *anon_vma;
269
270 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
271 if (unlikely(!avc)) {
272 unlock_anon_vma_root(root);
273 root = NULL;
274 avc = anon_vma_chain_alloc(GFP_KERNEL);
275 if (!avc)
276 goto enomem_failure;
277 }
278 anon_vma = pavc->anon_vma;
279 root = lock_anon_vma_root(root, anon_vma);
280 anon_vma_chain_link(dst, avc, anon_vma);
281
282
283
284
285
286
287
288
289
290 if (!dst->anon_vma && anon_vma != src->anon_vma &&
291 anon_vma->degree < 2)
292 dst->anon_vma = anon_vma;
293 }
294 if (dst->anon_vma)
295 dst->anon_vma->degree++;
296 unlock_anon_vma_root(root);
297 return 0;
298
299 enomem_failure:
300
301
302
303
304
305
306 dst->anon_vma = NULL;
307 unlink_anon_vmas(dst);
308 return -ENOMEM;
309 }
310
311
312
313
314
315
316 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
317 {
318 struct anon_vma_chain *avc;
319 struct anon_vma *anon_vma;
320 int error;
321
322
323 if (!pvma->anon_vma)
324 return 0;
325
326
327 vma->anon_vma = NULL;
328
329
330
331
332
333 error = anon_vma_clone(vma, pvma);
334 if (error)
335 return error;
336
337
338 if (vma->anon_vma)
339 return 0;
340
341
342 anon_vma = anon_vma_alloc();
343 if (!anon_vma)
344 goto out_error;
345 avc = anon_vma_chain_alloc(GFP_KERNEL);
346 if (!avc)
347 goto out_error_free_anon_vma;
348
349
350
351
352
353 anon_vma->root = pvma->anon_vma->root;
354 anon_vma->parent = pvma->anon_vma;
355
356
357
358
359
360 get_anon_vma(anon_vma->root);
361
362 vma->anon_vma = anon_vma;
363 anon_vma_lock_write(anon_vma);
364 anon_vma_chain_link(vma, avc, anon_vma);
365 anon_vma->parent->degree++;
366 anon_vma_unlock_write(anon_vma);
367
368 return 0;
369
370 out_error_free_anon_vma:
371 put_anon_vma(anon_vma);
372 out_error:
373 unlink_anon_vmas(vma);
374 return -ENOMEM;
375 }
376
377 void unlink_anon_vmas(struct vm_area_struct *vma)
378 {
379 struct anon_vma_chain *avc, *next;
380 struct anon_vma *root = NULL;
381
382
383
384
385
386 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
387 struct anon_vma *anon_vma = avc->anon_vma;
388
389 root = lock_anon_vma_root(root, anon_vma);
390 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
391
392
393
394
395
396 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
397 anon_vma->parent->degree--;
398 continue;
399 }
400
401 list_del(&avc->same_vma);
402 anon_vma_chain_free(avc);
403 }
404 if (vma->anon_vma)
405 vma->anon_vma->degree--;
406 unlock_anon_vma_root(root);
407
408
409
410
411
412
413 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
414 struct anon_vma *anon_vma = avc->anon_vma;
415
416 VM_WARN_ON(anon_vma->degree);
417 put_anon_vma(anon_vma);
418
419 list_del(&avc->same_vma);
420 anon_vma_chain_free(avc);
421 }
422 }
423
424 static void anon_vma_ctor(void *data)
425 {
426 struct anon_vma *anon_vma = data;
427
428 init_rwsem(&anon_vma->rwsem);
429 atomic_set(&anon_vma->refcount, 0);
430 anon_vma->rb_root = RB_ROOT_CACHED;
431 }
432
433 void __init anon_vma_init(void)
434 {
435 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
436 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
437 anon_vma_ctor);
438 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
439 SLAB_PANIC|SLAB_ACCOUNT);
440 }
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465 struct anon_vma *page_get_anon_vma(struct page *page)
466 {
467 struct anon_vma *anon_vma = NULL;
468 unsigned long anon_mapping;
469
470 rcu_read_lock();
471 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
472 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
473 goto out;
474 if (!page_mapped(page))
475 goto out;
476
477 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
478 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
479 anon_vma = NULL;
480 goto out;
481 }
482
483
484
485
486
487
488
489
490 if (!page_mapped(page)) {
491 rcu_read_unlock();
492 put_anon_vma(anon_vma);
493 return NULL;
494 }
495 out:
496 rcu_read_unlock();
497
498 return anon_vma;
499 }
500
501
502
503
504
505
506
507
508 struct anon_vma *page_lock_anon_vma_read(struct page *page)
509 {
510 struct anon_vma *anon_vma = NULL;
511 struct anon_vma *root_anon_vma;
512 unsigned long anon_mapping;
513
514 rcu_read_lock();
515 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
516 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
517 goto out;
518 if (!page_mapped(page))
519 goto out;
520
521 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
522 root_anon_vma = READ_ONCE(anon_vma->root);
523 if (down_read_trylock(&root_anon_vma->rwsem)) {
524
525
526
527
528
529 if (!page_mapped(page)) {
530 up_read(&root_anon_vma->rwsem);
531 anon_vma = NULL;
532 }
533 goto out;
534 }
535
536
537 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
538 anon_vma = NULL;
539 goto out;
540 }
541
542 if (!page_mapped(page)) {
543 rcu_read_unlock();
544 put_anon_vma(anon_vma);
545 return NULL;
546 }
547
548
549 rcu_read_unlock();
550 anon_vma_lock_read(anon_vma);
551
552 if (atomic_dec_and_test(&anon_vma->refcount)) {
553
554
555
556
557
558 anon_vma_unlock_read(anon_vma);
559 __put_anon_vma(anon_vma);
560 anon_vma = NULL;
561 }
562
563 return anon_vma;
564
565 out:
566 rcu_read_unlock();
567 return anon_vma;
568 }
569
570 void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
571 {
572 anon_vma_unlock_read(anon_vma);
573 }
574
575 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
576
577
578
579
580
581
582 void try_to_unmap_flush(void)
583 {
584 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
585
586 if (!tlb_ubc->flush_required)
587 return;
588
589 arch_tlbbatch_flush(&tlb_ubc->arch);
590 tlb_ubc->flush_required = false;
591 tlb_ubc->writable = false;
592 }
593
594
595 void try_to_unmap_flush_dirty(void)
596 {
597 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
598
599 if (tlb_ubc->writable)
600 try_to_unmap_flush();
601 }
602
603 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
604 {
605 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
606
607 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
608 tlb_ubc->flush_required = true;
609
610
611
612
613
614 barrier();
615 mm->tlb_flush_batched = true;
616
617
618
619
620
621
622 if (writable)
623 tlb_ubc->writable = true;
624 }
625
626
627
628
629
630 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
631 {
632 bool should_defer = false;
633
634 if (!(flags & TTU_BATCH_FLUSH))
635 return false;
636
637
638 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
639 should_defer = true;
640 put_cpu();
641
642 return should_defer;
643 }
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660 void flush_tlb_batched_pending(struct mm_struct *mm)
661 {
662 if (mm->tlb_flush_batched) {
663 flush_tlb_mm(mm);
664
665
666
667
668
669 barrier();
670 mm->tlb_flush_batched = false;
671 }
672 }
673 #else
674 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
675 {
676 }
677
678 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
679 {
680 return false;
681 }
682 #endif
683
684
685
686
687
688 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
689 {
690 unsigned long address;
691 if (PageAnon(page)) {
692 struct anon_vma *page__anon_vma = page_anon_vma(page);
693
694
695
696
697 if (!vma->anon_vma || !page__anon_vma ||
698 vma->anon_vma->root != page__anon_vma->root)
699 return -EFAULT;
700 } else if (page->mapping) {
701 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
702 return -EFAULT;
703 } else
704 return -EFAULT;
705 address = __vma_address(page, vma);
706 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
707 return -EFAULT;
708 return address;
709 }
710
711 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
712 {
713 pgd_t *pgd;
714 p4d_t *p4d;
715 pud_t *pud;
716 pmd_t *pmd = NULL;
717 pmd_t pmde;
718
719 pgd = pgd_offset(mm, address);
720 if (!pgd_present(*pgd))
721 goto out;
722
723 p4d = p4d_offset(pgd, address);
724 if (!p4d_present(*p4d))
725 goto out;
726
727 pud = pud_offset(p4d, address);
728 if (!pud_present(*pud))
729 goto out;
730
731 pmd = pmd_offset(pud, address);
732
733
734
735
736
737 pmde = *pmd;
738 barrier();
739 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
740 pmd = NULL;
741 out:
742 return pmd;
743 }
744
745 struct page_referenced_arg {
746 int mapcount;
747 int referenced;
748 unsigned long vm_flags;
749 struct mem_cgroup *memcg;
750 };
751
752
753
754 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
755 unsigned long address, void *arg)
756 {
757 struct page_referenced_arg *pra = arg;
758 struct page_vma_mapped_walk pvmw = {
759 .page = page,
760 .vma = vma,
761 .address = address,
762 };
763 int referenced = 0;
764
765 while (page_vma_mapped_walk(&pvmw)) {
766 address = pvmw.address;
767
768 if (vma->vm_flags & VM_LOCKED) {
769 page_vma_mapped_walk_done(&pvmw);
770 pra->vm_flags |= VM_LOCKED;
771 return false;
772 }
773
774 if (pvmw.pte) {
775 if (ptep_clear_flush_young_notify(vma, address,
776 pvmw.pte)) {
777
778
779
780
781
782
783
784
785 if (likely(!(vma->vm_flags & VM_SEQ_READ)))
786 referenced++;
787 }
788 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
789 if (pmdp_clear_flush_young_notify(vma, address,
790 pvmw.pmd))
791 referenced++;
792 } else {
793
794 WARN_ON_ONCE(1);
795 }
796
797 pra->mapcount--;
798 }
799
800 if (referenced)
801 clear_page_idle(page);
802 if (test_and_clear_page_young(page))
803 referenced++;
804
805 if (referenced) {
806 pra->referenced++;
807 pra->vm_flags |= vma->vm_flags;
808 }
809
810 if (!pra->mapcount)
811 return false;
812
813 return true;
814 }
815
816 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
817 {
818 struct page_referenced_arg *pra = arg;
819 struct mem_cgroup *memcg = pra->memcg;
820
821 if (!mm_match_cgroup(vma->vm_mm, memcg))
822 return true;
823
824 return false;
825 }
826
827
828
829
830
831
832
833
834
835
836
837 int page_referenced(struct page *page,
838 int is_locked,
839 struct mem_cgroup *memcg,
840 unsigned long *vm_flags)
841 {
842 int we_locked = 0;
843 struct page_referenced_arg pra = {
844 .mapcount = total_mapcount(page),
845 .memcg = memcg,
846 };
847 struct rmap_walk_control rwc = {
848 .rmap_one = page_referenced_one,
849 .arg = (void *)&pra,
850 .anon_lock = page_lock_anon_vma_read,
851 };
852
853 *vm_flags = 0;
854 if (!pra.mapcount)
855 return 0;
856
857 if (!page_rmapping(page))
858 return 0;
859
860 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
861 we_locked = trylock_page(page);
862 if (!we_locked)
863 return 1;
864 }
865
866
867
868
869
870
871 if (memcg) {
872 rwc.invalid_vma = invalid_page_referenced_vma;
873 }
874
875 rmap_walk(page, &rwc);
876 *vm_flags = pra.vm_flags;
877
878 if (we_locked)
879 unlock_page(page);
880
881 return pra.referenced;
882 }
883
884 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
885 unsigned long address, void *arg)
886 {
887 struct page_vma_mapped_walk pvmw = {
888 .page = page,
889 .vma = vma,
890 .address = address,
891 .flags = PVMW_SYNC,
892 };
893 struct mmu_notifier_range range;
894 int *cleaned = arg;
895
896
897
898
899
900 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
901 0, vma, vma->vm_mm, address,
902 min(vma->vm_end, address + page_size(page)));
903 mmu_notifier_invalidate_range_start(&range);
904
905 while (page_vma_mapped_walk(&pvmw)) {
906 int ret = 0;
907
908 address = pvmw.address;
909 if (pvmw.pte) {
910 pte_t entry;
911 pte_t *pte = pvmw.pte;
912
913 if (!pte_dirty(*pte) && !pte_write(*pte))
914 continue;
915
916 flush_cache_page(vma, address, pte_pfn(*pte));
917 entry = ptep_clear_flush(vma, address, pte);
918 entry = pte_wrprotect(entry);
919 entry = pte_mkclean(entry);
920 set_pte_at(vma->vm_mm, address, pte, entry);
921 ret = 1;
922 } else {
923 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
924 pmd_t *pmd = pvmw.pmd;
925 pmd_t entry;
926
927 if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
928 continue;
929
930 flush_cache_page(vma, address, page_to_pfn(page));
931 entry = pmdp_invalidate(vma, address, pmd);
932 entry = pmd_wrprotect(entry);
933 entry = pmd_mkclean(entry);
934 set_pmd_at(vma->vm_mm, address, pmd, entry);
935 ret = 1;
936 #else
937
938 WARN_ON_ONCE(1);
939 #endif
940 }
941
942
943
944
945
946
947
948
949 if (ret)
950 (*cleaned)++;
951 }
952
953 mmu_notifier_invalidate_range_end(&range);
954
955 return true;
956 }
957
958 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
959 {
960 if (vma->vm_flags & VM_SHARED)
961 return false;
962
963 return true;
964 }
965
966 int page_mkclean(struct page *page)
967 {
968 int cleaned = 0;
969 struct address_space *mapping;
970 struct rmap_walk_control rwc = {
971 .arg = (void *)&cleaned,
972 .rmap_one = page_mkclean_one,
973 .invalid_vma = invalid_mkclean_vma,
974 };
975
976 BUG_ON(!PageLocked(page));
977
978 if (!page_mapped(page))
979 return 0;
980
981 mapping = page_mapping(page);
982 if (!mapping)
983 return 0;
984
985 rmap_walk(page, &rwc);
986
987 return cleaned;
988 }
989 EXPORT_SYMBOL_GPL(page_mkclean);
990
991
992
993
994
995
996
997
998
999
1000
1001 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
1002 {
1003 struct anon_vma *anon_vma = vma->anon_vma;
1004
1005 page = compound_head(page);
1006
1007 VM_BUG_ON_PAGE(!PageLocked(page), page);
1008 VM_BUG_ON_VMA(!anon_vma, vma);
1009
1010 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1011
1012
1013
1014
1015
1016 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1017 }
1018
1019
1020
1021
1022
1023
1024
1025
1026 static void __page_set_anon_rmap(struct page *page,
1027 struct vm_area_struct *vma, unsigned long address, int exclusive)
1028 {
1029 struct anon_vma *anon_vma = vma->anon_vma;
1030
1031 BUG_ON(!anon_vma);
1032
1033 if (PageAnon(page))
1034 return;
1035
1036
1037
1038
1039
1040
1041 if (!exclusive)
1042 anon_vma = anon_vma->root;
1043
1044 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1045 page->mapping = (struct address_space *) anon_vma;
1046 page->index = linear_page_index(vma, address);
1047 }
1048
1049
1050
1051
1052
1053
1054
1055 static void __page_check_anon_rmap(struct page *page,
1056 struct vm_area_struct *vma, unsigned long address)
1057 {
1058 #ifdef CONFIG_DEBUG_VM
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
1072 BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address));
1073 #endif
1074 }
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088 void page_add_anon_rmap(struct page *page,
1089 struct vm_area_struct *vma, unsigned long address, bool compound)
1090 {
1091 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
1092 }
1093
1094
1095
1096
1097
1098
1099 void do_page_add_anon_rmap(struct page *page,
1100 struct vm_area_struct *vma, unsigned long address, int flags)
1101 {
1102 bool compound = flags & RMAP_COMPOUND;
1103 bool first;
1104
1105 if (compound) {
1106 atomic_t *mapcount;
1107 VM_BUG_ON_PAGE(!PageLocked(page), page);
1108 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1109 mapcount = compound_mapcount_ptr(page);
1110 first = atomic_inc_and_test(mapcount);
1111 } else {
1112 first = atomic_inc_and_test(&page->_mapcount);
1113 }
1114
1115 if (first) {
1116 int nr = compound ? hpage_nr_pages(page) : 1;
1117
1118
1119
1120
1121
1122
1123 if (compound)
1124 __inc_node_page_state(page, NR_ANON_THPS);
1125 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
1126 }
1127 if (unlikely(PageKsm(page)))
1128 return;
1129
1130 VM_BUG_ON_PAGE(!PageLocked(page), page);
1131
1132
1133 if (first)
1134 __page_set_anon_rmap(page, vma, address,
1135 flags & RMAP_EXCLUSIVE);
1136 else
1137 __page_check_anon_rmap(page, vma, address);
1138 }
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151 void page_add_new_anon_rmap(struct page *page,
1152 struct vm_area_struct *vma, unsigned long address, bool compound)
1153 {
1154 int nr = compound ? hpage_nr_pages(page) : 1;
1155
1156 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1157 __SetPageSwapBacked(page);
1158 if (compound) {
1159 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1160
1161 atomic_set(compound_mapcount_ptr(page), 0);
1162 __inc_node_page_state(page, NR_ANON_THPS);
1163 } else {
1164
1165 VM_BUG_ON_PAGE(PageTransCompound(page), page);
1166
1167 atomic_set(&page->_mapcount, 0);
1168 }
1169 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
1170 __page_set_anon_rmap(page, vma, address, 1);
1171 }
1172
1173
1174
1175
1176
1177
1178
1179
1180 void page_add_file_rmap(struct page *page, bool compound)
1181 {
1182 int i, nr = 1;
1183
1184 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
1185 lock_page_memcg(page);
1186 if (compound && PageTransHuge(page)) {
1187 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1188 if (atomic_inc_and_test(&page[i]._mapcount))
1189 nr++;
1190 }
1191 if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
1192 goto out;
1193 if (PageSwapBacked(page))
1194 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
1195 else
1196 __inc_node_page_state(page, NR_FILE_PMDMAPPED);
1197 } else {
1198 if (PageTransCompound(page) && page_mapping(page)) {
1199 VM_WARN_ON_ONCE(!PageLocked(page));
1200
1201 SetPageDoubleMap(compound_head(page));
1202 if (PageMlocked(page))
1203 clear_page_mlock(compound_head(page));
1204 }
1205 if (!atomic_inc_and_test(&page->_mapcount))
1206 goto out;
1207 }
1208 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
1209 out:
1210 unlock_page_memcg(page);
1211 }
1212
1213 static void page_remove_file_rmap(struct page *page, bool compound)
1214 {
1215 int i, nr = 1;
1216
1217 VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1218 lock_page_memcg(page);
1219
1220
1221 if (unlikely(PageHuge(page))) {
1222
1223 atomic_dec(compound_mapcount_ptr(page));
1224 goto out;
1225 }
1226
1227
1228 if (compound && PageTransHuge(page)) {
1229 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1230 if (atomic_add_negative(-1, &page[i]._mapcount))
1231 nr++;
1232 }
1233 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1234 goto out;
1235 if (PageSwapBacked(page))
1236 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
1237 else
1238 __dec_node_page_state(page, NR_FILE_PMDMAPPED);
1239 } else {
1240 if (!atomic_add_negative(-1, &page->_mapcount))
1241 goto out;
1242 }
1243
1244
1245
1246
1247
1248
1249 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
1250
1251 if (unlikely(PageMlocked(page)))
1252 clear_page_mlock(page);
1253 out:
1254 unlock_page_memcg(page);
1255 }
1256
1257 static void page_remove_anon_compound_rmap(struct page *page)
1258 {
1259 int i, nr;
1260
1261 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1262 return;
1263
1264
1265 if (unlikely(PageHuge(page)))
1266 return;
1267
1268 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1269 return;
1270
1271 __dec_node_page_state(page, NR_ANON_THPS);
1272
1273 if (TestClearPageDoubleMap(page)) {
1274
1275
1276
1277
1278 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1279 if (atomic_add_negative(-1, &page[i]._mapcount))
1280 nr++;
1281 }
1282 } else {
1283 nr = HPAGE_PMD_NR;
1284 }
1285
1286 if (unlikely(PageMlocked(page)))
1287 clear_page_mlock(page);
1288
1289 if (nr) {
1290 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);
1291 deferred_split_huge_page(page);
1292 }
1293 }
1294
1295
1296
1297
1298
1299
1300
1301
1302 void page_remove_rmap(struct page *page, bool compound)
1303 {
1304 if (!PageAnon(page))
1305 return page_remove_file_rmap(page, compound);
1306
1307 if (compound)
1308 return page_remove_anon_compound_rmap(page);
1309
1310
1311 if (!atomic_add_negative(-1, &page->_mapcount))
1312 return;
1313
1314
1315
1316
1317
1318
1319 __dec_node_page_state(page, NR_ANON_MAPPED);
1320
1321 if (unlikely(PageMlocked(page)))
1322 clear_page_mlock(page);
1323
1324 if (PageTransCompound(page))
1325 deferred_split_huge_page(compound_head(page));
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336 }
1337
1338
1339
1340
1341 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1342 unsigned long address, void *arg)
1343 {
1344 struct mm_struct *mm = vma->vm_mm;
1345 struct page_vma_mapped_walk pvmw = {
1346 .page = page,
1347 .vma = vma,
1348 .address = address,
1349 };
1350 pte_t pteval;
1351 struct page *subpage;
1352 bool ret = true;
1353 struct mmu_notifier_range range;
1354 enum ttu_flags flags = (enum ttu_flags)arg;
1355
1356
1357 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
1358 return true;
1359
1360 if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
1361 is_zone_device_page(page) && !is_device_private_page(page))
1362 return true;
1363
1364 if (flags & TTU_SPLIT_HUGE_PMD) {
1365 split_huge_pmd_address(vma, address,
1366 flags & TTU_SPLIT_FREEZE, page);
1367 }
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1378 address,
1379 min(vma->vm_end, address + page_size(page)));
1380 if (PageHuge(page)) {
1381
1382
1383
1384
1385 adjust_range_if_pmd_sharing_possible(vma, &range.start,
1386 &range.end);
1387 }
1388 mmu_notifier_invalidate_range_start(&range);
1389
1390 while (page_vma_mapped_walk(&pvmw)) {
1391 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1392
1393 if (!pvmw.pte && (flags & TTU_MIGRATION)) {
1394 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
1395
1396 set_pmd_migration_entry(&pvmw, page);
1397 continue;
1398 }
1399 #endif
1400
1401
1402
1403
1404
1405
1406 if (!(flags & TTU_IGNORE_MLOCK)) {
1407 if (vma->vm_flags & VM_LOCKED) {
1408
1409 if (!PageTransCompound(page)) {
1410
1411
1412
1413
1414 mlock_vma_page(page);
1415 }
1416 ret = false;
1417 page_vma_mapped_walk_done(&pvmw);
1418 break;
1419 }
1420 if (flags & TTU_MUNLOCK)
1421 continue;
1422 }
1423
1424
1425 VM_BUG_ON_PAGE(!pvmw.pte, page);
1426
1427 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1428 address = pvmw.address;
1429
1430 if (PageHuge(page)) {
1431 if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
1432
1433
1434
1435
1436
1437
1438
1439 flush_cache_range(vma, range.start, range.end);
1440 flush_tlb_range(vma, range.start, range.end);
1441 mmu_notifier_invalidate_range(mm, range.start,
1442 range.end);
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453 page_vma_mapped_walk_done(&pvmw);
1454 break;
1455 }
1456 }
1457
1458 if (IS_ENABLED(CONFIG_MIGRATION) &&
1459 (flags & TTU_MIGRATION) &&
1460 is_zone_device_page(page)) {
1461 swp_entry_t entry;
1462 pte_t swp_pte;
1463
1464 pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte);
1465
1466
1467
1468
1469
1470
1471 entry = make_migration_entry(page, 0);
1472 swp_pte = swp_entry_to_pte(entry);
1473 if (pte_soft_dirty(pteval))
1474 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1475 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487 subpage = page;
1488 goto discard;
1489 }
1490
1491 if (!(flags & TTU_IGNORE_ACCESS)) {
1492 if (ptep_clear_flush_young_notify(vma, address,
1493 pvmw.pte)) {
1494 ret = false;
1495 page_vma_mapped_walk_done(&pvmw);
1496 break;
1497 }
1498 }
1499
1500
1501 flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1502 if (should_defer_flush(mm, flags)) {
1503
1504
1505
1506
1507
1508
1509
1510
1511 pteval = ptep_get_and_clear(mm, address, pvmw.pte);
1512
1513 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
1514 } else {
1515 pteval = ptep_clear_flush(vma, address, pvmw.pte);
1516 }
1517
1518
1519 if (pte_dirty(pteval))
1520 set_page_dirty(page);
1521
1522
1523 update_hiwater_rss(mm);
1524
1525 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1526 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1527 if (PageHuge(page)) {
1528 hugetlb_count_sub(compound_nr(page), mm);
1529 set_huge_swap_pte_at(mm, address,
1530 pvmw.pte, pteval,
1531 vma_mmu_pagesize(vma));
1532 } else {
1533 dec_mm_counter(mm, mm_counter(page));
1534 set_pte_at(mm, address, pvmw.pte, pteval);
1535 }
1536
1537 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548 dec_mm_counter(mm, mm_counter(page));
1549
1550 mmu_notifier_invalidate_range(mm, address,
1551 address + PAGE_SIZE);
1552 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1553 (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) {
1554 swp_entry_t entry;
1555 pte_t swp_pte;
1556
1557 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1558 set_pte_at(mm, address, pvmw.pte, pteval);
1559 ret = false;
1560 page_vma_mapped_walk_done(&pvmw);
1561 break;
1562 }
1563
1564
1565
1566
1567
1568
1569 entry = make_migration_entry(subpage,
1570 pte_write(pteval));
1571 swp_pte = swp_entry_to_pte(entry);
1572 if (pte_soft_dirty(pteval))
1573 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1574 set_pte_at(mm, address, pvmw.pte, swp_pte);
1575
1576
1577
1578
1579 } else if (PageAnon(page)) {
1580 swp_entry_t entry = { .val = page_private(subpage) };
1581 pte_t swp_pte;
1582
1583
1584
1585
1586 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
1587 WARN_ON_ONCE(1);
1588 ret = false;
1589
1590 mmu_notifier_invalidate_range(mm, address,
1591 address + PAGE_SIZE);
1592 page_vma_mapped_walk_done(&pvmw);
1593 break;
1594 }
1595
1596
1597 if (!PageSwapBacked(page)) {
1598 if (!PageDirty(page)) {
1599
1600 mmu_notifier_invalidate_range(mm,
1601 address, address + PAGE_SIZE);
1602 dec_mm_counter(mm, MM_ANONPAGES);
1603 goto discard;
1604 }
1605
1606
1607
1608
1609
1610 set_pte_at(mm, address, pvmw.pte, pteval);
1611 SetPageSwapBacked(page);
1612 ret = false;
1613 page_vma_mapped_walk_done(&pvmw);
1614 break;
1615 }
1616
1617 if (swap_duplicate(entry) < 0) {
1618 set_pte_at(mm, address, pvmw.pte, pteval);
1619 ret = false;
1620 page_vma_mapped_walk_done(&pvmw);
1621 break;
1622 }
1623 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1624 set_pte_at(mm, address, pvmw.pte, pteval);
1625 ret = false;
1626 page_vma_mapped_walk_done(&pvmw);
1627 break;
1628 }
1629 if (list_empty(&mm->mmlist)) {
1630 spin_lock(&mmlist_lock);
1631 if (list_empty(&mm->mmlist))
1632 list_add(&mm->mmlist, &init_mm.mmlist);
1633 spin_unlock(&mmlist_lock);
1634 }
1635 dec_mm_counter(mm, MM_ANONPAGES);
1636 inc_mm_counter(mm, MM_SWAPENTS);
1637 swp_pte = swp_entry_to_pte(entry);
1638 if (pte_soft_dirty(pteval))
1639 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1640 set_pte_at(mm, address, pvmw.pte, swp_pte);
1641
1642 mmu_notifier_invalidate_range(mm, address,
1643 address + PAGE_SIZE);
1644 } else {
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655 dec_mm_counter(mm, mm_counter_file(page));
1656 }
1657 discard:
1658
1659
1660
1661
1662
1663
1664
1665 page_remove_rmap(subpage, PageHuge(page));
1666 put_page(page);
1667 }
1668
1669 mmu_notifier_invalidate_range_end(&range);
1670
1671 return ret;
1672 }
1673
1674 bool is_vma_temporary_stack(struct vm_area_struct *vma)
1675 {
1676 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1677
1678 if (!maybe_stack)
1679 return false;
1680
1681 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1682 VM_STACK_INCOMPLETE_SETUP)
1683 return true;
1684
1685 return false;
1686 }
1687
1688 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1689 {
1690 return is_vma_temporary_stack(vma);
1691 }
1692
1693 static int page_mapcount_is_zero(struct page *page)
1694 {
1695 return !total_mapcount(page);
1696 }
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708 bool try_to_unmap(struct page *page, enum ttu_flags flags)
1709 {
1710 struct rmap_walk_control rwc = {
1711 .rmap_one = try_to_unmap_one,
1712 .arg = (void *)flags,
1713 .done = page_mapcount_is_zero,
1714 .anon_lock = page_lock_anon_vma_read,
1715 };
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725 if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))
1726 && !PageKsm(page) && PageAnon(page))
1727 rwc.invalid_vma = invalid_migration_vma;
1728
1729 if (flags & TTU_RMAP_LOCKED)
1730 rmap_walk_locked(page, &rwc);
1731 else
1732 rmap_walk(page, &rwc);
1733
1734 return !page_mapcount(page) ? true : false;
1735 }
1736
1737 static int page_not_mapped(struct page *page)
1738 {
1739 return !page_mapped(page);
1740 };
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751 void try_to_munlock(struct page *page)
1752 {
1753 struct rmap_walk_control rwc = {
1754 .rmap_one = try_to_unmap_one,
1755 .arg = (void *)TTU_MUNLOCK,
1756 .done = page_not_mapped,
1757 .anon_lock = page_lock_anon_vma_read,
1758
1759 };
1760
1761 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1762 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
1763
1764 rmap_walk(page, &rwc);
1765 }
1766
1767 void __put_anon_vma(struct anon_vma *anon_vma)
1768 {
1769 struct anon_vma *root = anon_vma->root;
1770
1771 anon_vma_free(anon_vma);
1772 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1773 anon_vma_free(root);
1774 }
1775
1776 static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1777 struct rmap_walk_control *rwc)
1778 {
1779 struct anon_vma *anon_vma;
1780
1781 if (rwc->anon_lock)
1782 return rwc->anon_lock(page);
1783
1784
1785
1786
1787
1788
1789
1790 anon_vma = page_anon_vma(page);
1791 if (!anon_vma)
1792 return NULL;
1793
1794 anon_vma_lock_read(anon_vma);
1795 return anon_vma;
1796 }
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812 static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
1813 bool locked)
1814 {
1815 struct anon_vma *anon_vma;
1816 pgoff_t pgoff_start, pgoff_end;
1817 struct anon_vma_chain *avc;
1818
1819 if (locked) {
1820 anon_vma = page_anon_vma(page);
1821
1822 VM_BUG_ON_PAGE(!anon_vma, page);
1823 } else {
1824 anon_vma = rmap_walk_anon_lock(page, rwc);
1825 }
1826 if (!anon_vma)
1827 return;
1828
1829 pgoff_start = page_to_pgoff(page);
1830 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
1831 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
1832 pgoff_start, pgoff_end) {
1833 struct vm_area_struct *vma = avc->vma;
1834 unsigned long address = vma_address(page, vma);
1835
1836 cond_resched();
1837
1838 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1839 continue;
1840
1841 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1842 break;
1843 if (rwc->done && rwc->done(page))
1844 break;
1845 }
1846
1847 if (!locked)
1848 anon_vma_unlock_read(anon_vma);
1849 }
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864 static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
1865 bool locked)
1866 {
1867 struct address_space *mapping = page_mapping(page);
1868 pgoff_t pgoff_start, pgoff_end;
1869 struct vm_area_struct *vma;
1870
1871
1872
1873
1874
1875
1876
1877 VM_BUG_ON_PAGE(!PageLocked(page), page);
1878
1879 if (!mapping)
1880 return;
1881
1882 pgoff_start = page_to_pgoff(page);
1883 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
1884 if (!locked)
1885 i_mmap_lock_read(mapping);
1886 vma_interval_tree_foreach(vma, &mapping->i_mmap,
1887 pgoff_start, pgoff_end) {
1888 unsigned long address = vma_address(page, vma);
1889
1890 cond_resched();
1891
1892 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1893 continue;
1894
1895 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1896 goto done;
1897 if (rwc->done && rwc->done(page))
1898 goto done;
1899 }
1900
1901 done:
1902 if (!locked)
1903 i_mmap_unlock_read(mapping);
1904 }
1905
1906 void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1907 {
1908 if (unlikely(PageKsm(page)))
1909 rmap_walk_ksm(page, rwc);
1910 else if (PageAnon(page))
1911 rmap_walk_anon(page, rwc, false);
1912 else
1913 rmap_walk_file(page, rwc, false);
1914 }
1915
1916
1917 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
1918 {
1919
1920 VM_BUG_ON_PAGE(PageKsm(page), page);
1921 if (PageAnon(page))
1922 rmap_walk_anon(page, rwc, true);
1923 else
1924 rmap_walk_file(page, rwc, true);
1925 }
1926
1927 #ifdef CONFIG_HUGETLB_PAGE
1928
1929
1930
1931
1932
1933 void hugepage_add_anon_rmap(struct page *page,
1934 struct vm_area_struct *vma, unsigned long address)
1935 {
1936 struct anon_vma *anon_vma = vma->anon_vma;
1937 int first;
1938
1939 BUG_ON(!PageLocked(page));
1940 BUG_ON(!anon_vma);
1941
1942 first = atomic_inc_and_test(compound_mapcount_ptr(page));
1943 if (first)
1944 __page_set_anon_rmap(page, vma, address, 0);
1945 }
1946
1947 void hugepage_add_new_anon_rmap(struct page *page,
1948 struct vm_area_struct *vma, unsigned long address)
1949 {
1950 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1951 atomic_set(compound_mapcount_ptr(page), 0);
1952 __page_set_anon_rmap(page, vma, address, 1);
1953 }
1954 #endif