This source file includes following definitions.
- can_do_mlock
- clear_page_mlock
- mlock_vma_page
- __munlock_isolate_lru_page
- __munlock_isolated_page
- __munlock_isolation_failed
- munlock_vma_page
- __mlock_posix_error_return
- __putback_lru_fast_prepare
- __putback_lru_fast
- __munlock_pagevec
- __munlock_pagevec_fill
- munlock_vma_pages_range
- mlock_fixup
- apply_vma_lock_flags
- count_mm_mlocked_page_nr
- do_mlock
- SYSCALL_DEFINE2
- SYSCALL_DEFINE3
- SYSCALL_DEFINE2
- apply_mlockall_flags
- SYSCALL_DEFINE1
- SYSCALL_DEFINE0
- user_shm_lock
- user_shm_unlock
1
2
3
4
5
6
7
8
9 #include <linux/capability.h>
10 #include <linux/mman.h>
11 #include <linux/mm.h>
12 #include <linux/sched/user.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mempolicy.h>
18 #include <linux/syscalls.h>
19 #include <linux/sched.h>
20 #include <linux/export.h>
21 #include <linux/rmap.h>
22 #include <linux/mmzone.h>
23 #include <linux/hugetlb.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm_inline.h>
26
27 #include "internal.h"
28
29 bool can_do_mlock(void)
30 {
31 if (rlimit(RLIMIT_MEMLOCK) != 0)
32 return true;
33 if (capable(CAP_IPC_LOCK))
34 return true;
35 return false;
36 }
37 EXPORT_SYMBOL(can_do_mlock);
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59 void clear_page_mlock(struct page *page)
60 {
61 if (!TestClearPageMlocked(page))
62 return;
63
64 mod_zone_page_state(page_zone(page), NR_MLOCK,
65 -hpage_nr_pages(page));
66 count_vm_event(UNEVICTABLE_PGCLEARED);
67
68
69
70
71
72
73 if (!isolate_lru_page(page)) {
74 putback_lru_page(page);
75 } else {
76
77
78
79 if (PageUnevictable(page))
80 count_vm_event(UNEVICTABLE_PGSTRANDED);
81 }
82 }
83
84
85
86
87
88 void mlock_vma_page(struct page *page)
89 {
90
91 BUG_ON(!PageLocked(page));
92
93 VM_BUG_ON_PAGE(PageTail(page), page);
94 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
95
96 if (!TestSetPageMlocked(page)) {
97 mod_zone_page_state(page_zone(page), NR_MLOCK,
98 hpage_nr_pages(page));
99 count_vm_event(UNEVICTABLE_PGMLOCKED);
100 if (!isolate_lru_page(page))
101 putback_lru_page(page);
102 }
103 }
104
105
106
107
108
109 static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
110 {
111 if (PageLRU(page)) {
112 struct lruvec *lruvec;
113
114 lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
115 if (getpage)
116 get_page(page);
117 ClearPageLRU(page);
118 del_page_from_lru_list(page, lruvec, page_lru(page));
119 return true;
120 }
121
122 return false;
123 }
124
125
126
127
128
129
130
131 static void __munlock_isolated_page(struct page *page)
132 {
133
134
135
136
137 if (page_mapcount(page) > 1)
138 try_to_munlock(page);
139
140
141 if (!PageMlocked(page))
142 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
143
144 putback_lru_page(page);
145 }
146
147
148
149
150
151
152
153
154
155
156 static void __munlock_isolation_failed(struct page *page)
157 {
158 if (PageUnevictable(page))
159 __count_vm_event(UNEVICTABLE_PGSTRANDED);
160 else
161 __count_vm_event(UNEVICTABLE_PGMUNLOCKED);
162 }
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182 unsigned int munlock_vma_page(struct page *page)
183 {
184 int nr_pages;
185 pg_data_t *pgdat = page_pgdat(page);
186
187
188 BUG_ON(!PageLocked(page));
189
190 VM_BUG_ON_PAGE(PageTail(page), page);
191
192
193
194
195
196
197 spin_lock_irq(&pgdat->lru_lock);
198
199 if (!TestClearPageMlocked(page)) {
200
201 nr_pages = 1;
202 goto unlock_out;
203 }
204
205 nr_pages = hpage_nr_pages(page);
206 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
207
208 if (__munlock_isolate_lru_page(page, true)) {
209 spin_unlock_irq(&pgdat->lru_lock);
210 __munlock_isolated_page(page);
211 goto out;
212 }
213 __munlock_isolation_failed(page);
214
215 unlock_out:
216 spin_unlock_irq(&pgdat->lru_lock);
217
218 out:
219 return nr_pages - 1;
220 }
221
222
223
224
225 static int __mlock_posix_error_return(long retval)
226 {
227 if (retval == -EFAULT)
228 retval = -ENOMEM;
229 else if (retval == -ENOMEM)
230 retval = -EAGAIN;
231 return retval;
232 }
233
234
235
236
237
238
239
240
241
242
243
244
245
246 static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
247 int *pgrescued)
248 {
249 VM_BUG_ON_PAGE(PageLRU(page), page);
250 VM_BUG_ON_PAGE(!PageLocked(page), page);
251
252 if (page_mapcount(page) <= 1 && page_evictable(page)) {
253 pagevec_add(pvec, page);
254 if (TestClearPageUnevictable(page))
255 (*pgrescued)++;
256 unlock_page(page);
257 return true;
258 }
259
260 return false;
261 }
262
263
264
265
266
267
268
269 static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
270 {
271 count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
272
273
274
275
276 __pagevec_lru_add(pvec);
277 count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
278 }
279
280
281
282
283
284
285
286
287
288
289
290 static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
291 {
292 int i;
293 int nr = pagevec_count(pvec);
294 int delta_munlocked = -nr;
295 struct pagevec pvec_putback;
296 int pgrescued = 0;
297
298 pagevec_init(&pvec_putback);
299
300
301 spin_lock_irq(&zone->zone_pgdat->lru_lock);
302 for (i = 0; i < nr; i++) {
303 struct page *page = pvec->pages[i];
304
305 if (TestClearPageMlocked(page)) {
306
307
308
309
310 if (__munlock_isolate_lru_page(page, false))
311 continue;
312 else
313 __munlock_isolation_failed(page);
314 } else {
315 delta_munlocked++;
316 }
317
318
319
320
321
322
323
324 pagevec_add(&pvec_putback, pvec->pages[i]);
325 pvec->pages[i] = NULL;
326 }
327 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
328 spin_unlock_irq(&zone->zone_pgdat->lru_lock);
329
330
331 pagevec_release(&pvec_putback);
332
333
334 for (i = 0; i < nr; i++) {
335 struct page *page = pvec->pages[i];
336
337 if (page) {
338 lock_page(page);
339 if (!__putback_lru_fast_prepare(page, &pvec_putback,
340 &pgrescued)) {
341
342
343
344
345 get_page(page);
346 __munlock_isolated_page(page);
347 unlock_page(page);
348 put_page(page);
349 }
350 }
351 }
352
353
354
355
356
357 if (pagevec_count(&pvec_putback))
358 __putback_lru_fast(&pvec_putback, pgrescued);
359 }
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374 static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
375 struct vm_area_struct *vma, struct zone *zone,
376 unsigned long start, unsigned long end)
377 {
378 pte_t *pte;
379 spinlock_t *ptl;
380
381
382
383
384
385
386 pte = get_locked_pte(vma->vm_mm, start, &ptl);
387
388 end = pgd_addr_end(start, end);
389 end = p4d_addr_end(start, end);
390 end = pud_addr_end(start, end);
391 end = pmd_addr_end(start, end);
392
393
394 start += PAGE_SIZE;
395 while (start < end) {
396 struct page *page = NULL;
397 pte++;
398 if (pte_present(*pte))
399 page = vm_normal_page(vma, start, *pte);
400
401
402
403
404 if (!page || page_zone(page) != zone)
405 break;
406
407
408
409
410
411 if (PageTransCompound(page))
412 break;
413
414 get_page(page);
415
416
417
418
419 start += PAGE_SIZE;
420 if (pagevec_add(pvec, page) == 0)
421 break;
422 }
423 pte_unmap_unlock(pte, ptl);
424 return start;
425 }
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445 void munlock_vma_pages_range(struct vm_area_struct *vma,
446 unsigned long start, unsigned long end)
447 {
448 vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
449
450 while (start < end) {
451 struct page *page;
452 unsigned int page_mask = 0;
453 unsigned long page_increm;
454 struct pagevec pvec;
455 struct zone *zone;
456
457 pagevec_init(&pvec);
458
459
460
461
462
463
464
465 page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
466
467 if (page && !IS_ERR(page)) {
468 if (PageTransTail(page)) {
469 VM_BUG_ON_PAGE(PageMlocked(page), page);
470 put_page(page);
471 } else if (PageTransHuge(page)) {
472 lock_page(page);
473
474
475
476
477
478
479 page_mask = munlock_vma_page(page);
480 unlock_page(page);
481 put_page(page);
482 } else {
483
484
485
486
487
488 pagevec_add(&pvec, page);
489 zone = page_zone(page);
490
491
492
493
494
495
496
497 start = __munlock_pagevec_fill(&pvec, vma,
498 zone, start, end);
499 __munlock_pagevec(&pvec, zone);
500 goto next;
501 }
502 }
503 page_increm = 1 + page_mask;
504 start += page_increm * PAGE_SIZE;
505 next:
506 cond_resched();
507 }
508 }
509
510
511
512
513
514
515
516
517
518
519 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
520 unsigned long start, unsigned long end, vm_flags_t newflags)
521 {
522 struct mm_struct *mm = vma->vm_mm;
523 pgoff_t pgoff;
524 int nr_pages;
525 int ret = 0;
526 int lock = !!(newflags & VM_LOCKED);
527 vm_flags_t old_flags = vma->vm_flags;
528
529 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
530 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
531 vma_is_dax(vma))
532
533 goto out;
534
535 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
536 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
537 vma->vm_file, pgoff, vma_policy(vma),
538 vma->vm_userfaultfd_ctx);
539 if (*prev) {
540 vma = *prev;
541 goto success;
542 }
543
544 if (start != vma->vm_start) {
545 ret = split_vma(mm, vma, start, 1);
546 if (ret)
547 goto out;
548 }
549
550 if (end != vma->vm_end) {
551 ret = split_vma(mm, vma, end, 0);
552 if (ret)
553 goto out;
554 }
555
556 success:
557
558
559
560 nr_pages = (end - start) >> PAGE_SHIFT;
561 if (!lock)
562 nr_pages = -nr_pages;
563 else if (old_flags & VM_LOCKED)
564 nr_pages = 0;
565 mm->locked_vm += nr_pages;
566
567
568
569
570
571
572
573 if (lock)
574 vma->vm_flags = newflags;
575 else
576 munlock_vma_pages_range(vma, start, end);
577
578 out:
579 *prev = vma;
580 return ret;
581 }
582
583 static int apply_vma_lock_flags(unsigned long start, size_t len,
584 vm_flags_t flags)
585 {
586 unsigned long nstart, end, tmp;
587 struct vm_area_struct * vma, * prev;
588 int error;
589
590 VM_BUG_ON(offset_in_page(start));
591 VM_BUG_ON(len != PAGE_ALIGN(len));
592 end = start + len;
593 if (end < start)
594 return -EINVAL;
595 if (end == start)
596 return 0;
597 vma = find_vma(current->mm, start);
598 if (!vma || vma->vm_start > start)
599 return -ENOMEM;
600
601 prev = vma->vm_prev;
602 if (start > vma->vm_start)
603 prev = vma;
604
605 for (nstart = start ; ; ) {
606 vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
607
608 newflags |= flags;
609
610
611 tmp = vma->vm_end;
612 if (tmp > end)
613 tmp = end;
614 error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
615 if (error)
616 break;
617 nstart = tmp;
618 if (nstart < prev->vm_end)
619 nstart = prev->vm_end;
620 if (nstart >= end)
621 break;
622
623 vma = prev->vm_next;
624 if (!vma || vma->vm_start != nstart) {
625 error = -ENOMEM;
626 break;
627 }
628 }
629 return error;
630 }
631
632
633
634
635
636
637
638
639 static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
640 unsigned long start, size_t len)
641 {
642 struct vm_area_struct *vma;
643 unsigned long count = 0;
644
645 if (mm == NULL)
646 mm = current->mm;
647
648 vma = find_vma(mm, start);
649 if (vma == NULL)
650 vma = mm->mmap;
651
652 for (; vma ; vma = vma->vm_next) {
653 if (start >= vma->vm_end)
654 continue;
655 if (start + len <= vma->vm_start)
656 break;
657 if (vma->vm_flags & VM_LOCKED) {
658 if (start > vma->vm_start)
659 count -= (start - vma->vm_start);
660 if (start + len < vma->vm_end) {
661 count += start + len - vma->vm_start;
662 break;
663 }
664 count += vma->vm_end - vma->vm_start;
665 }
666 }
667
668 return count >> PAGE_SHIFT;
669 }
670
671 static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
672 {
673 unsigned long locked;
674 unsigned long lock_limit;
675 int error = -ENOMEM;
676
677 start = untagged_addr(start);
678
679 if (!can_do_mlock())
680 return -EPERM;
681
682 len = PAGE_ALIGN(len + (offset_in_page(start)));
683 start &= PAGE_MASK;
684
685 lock_limit = rlimit(RLIMIT_MEMLOCK);
686 lock_limit >>= PAGE_SHIFT;
687 locked = len >> PAGE_SHIFT;
688
689 if (down_write_killable(¤t->mm->mmap_sem))
690 return -EINTR;
691
692 locked += current->mm->locked_vm;
693 if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
694
695
696
697
698
699
700 locked -= count_mm_mlocked_page_nr(current->mm,
701 start, len);
702 }
703
704
705 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
706 error = apply_vma_lock_flags(start, len, flags);
707
708 up_write(¤t->mm->mmap_sem);
709 if (error)
710 return error;
711
712 error = __mm_populate(start, len, 0);
713 if (error)
714 return __mlock_posix_error_return(error);
715 return 0;
716 }
717
718 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
719 {
720 return do_mlock(start, len, VM_LOCKED);
721 }
722
723 SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
724 {
725 vm_flags_t vm_flags = VM_LOCKED;
726
727 if (flags & ~MLOCK_ONFAULT)
728 return -EINVAL;
729
730 if (flags & MLOCK_ONFAULT)
731 vm_flags |= VM_LOCKONFAULT;
732
733 return do_mlock(start, len, vm_flags);
734 }
735
736 SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
737 {
738 int ret;
739
740 start = untagged_addr(start);
741
742 len = PAGE_ALIGN(len + (offset_in_page(start)));
743 start &= PAGE_MASK;
744
745 if (down_write_killable(¤t->mm->mmap_sem))
746 return -EINTR;
747 ret = apply_vma_lock_flags(start, len, 0);
748 up_write(¤t->mm->mmap_sem);
749
750 return ret;
751 }
752
753
754
755
756
757
758
759
760
761
762
763 static int apply_mlockall_flags(int flags)
764 {
765 struct vm_area_struct * vma, * prev = NULL;
766 vm_flags_t to_add = 0;
767
768 current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
769 if (flags & MCL_FUTURE) {
770 current->mm->def_flags |= VM_LOCKED;
771
772 if (flags & MCL_ONFAULT)
773 current->mm->def_flags |= VM_LOCKONFAULT;
774
775 if (!(flags & MCL_CURRENT))
776 goto out;
777 }
778
779 if (flags & MCL_CURRENT) {
780 to_add |= VM_LOCKED;
781 if (flags & MCL_ONFAULT)
782 to_add |= VM_LOCKONFAULT;
783 }
784
785 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
786 vm_flags_t newflags;
787
788 newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
789 newflags |= to_add;
790
791
792 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
793 cond_resched();
794 }
795 out:
796 return 0;
797 }
798
799 SYSCALL_DEFINE1(mlockall, int, flags)
800 {
801 unsigned long lock_limit;
802 int ret;
803
804 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
805 flags == MCL_ONFAULT)
806 return -EINVAL;
807
808 if (!can_do_mlock())
809 return -EPERM;
810
811 lock_limit = rlimit(RLIMIT_MEMLOCK);
812 lock_limit >>= PAGE_SHIFT;
813
814 if (down_write_killable(¤t->mm->mmap_sem))
815 return -EINTR;
816
817 ret = -ENOMEM;
818 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
819 capable(CAP_IPC_LOCK))
820 ret = apply_mlockall_flags(flags);
821 up_write(¤t->mm->mmap_sem);
822 if (!ret && (flags & MCL_CURRENT))
823 mm_populate(0, TASK_SIZE);
824
825 return ret;
826 }
827
828 SYSCALL_DEFINE0(munlockall)
829 {
830 int ret;
831
832 if (down_write_killable(¤t->mm->mmap_sem))
833 return -EINTR;
834 ret = apply_mlockall_flags(0);
835 up_write(¤t->mm->mmap_sem);
836 return ret;
837 }
838
839
840
841
842
843 static DEFINE_SPINLOCK(shmlock_user_lock);
844
845 int user_shm_lock(size_t size, struct user_struct *user)
846 {
847 unsigned long lock_limit, locked;
848 int allowed = 0;
849
850 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
851 lock_limit = rlimit(RLIMIT_MEMLOCK);
852 if (lock_limit == RLIM_INFINITY)
853 allowed = 1;
854 lock_limit >>= PAGE_SHIFT;
855 spin_lock(&shmlock_user_lock);
856 if (!allowed &&
857 locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
858 goto out;
859 get_uid(user);
860 user->locked_shm += locked;
861 allowed = 1;
862 out:
863 spin_unlock(&shmlock_user_lock);
864 return allowed;
865 }
866
867 void user_shm_unlock(size_t size, struct user_struct *user)
868 {
869 spin_lock(&shmlock_user_lock);
870 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
871 spin_unlock(&shmlock_user_lock);
872 free_uid(user);
873 }