This source file includes following definitions.
- kobjsize
- follow_pfn
- vfree
- __vmalloc
- __vmalloc_node_flags
- vmalloc_user
- vmalloc_to_page
- vmalloc_to_pfn
- vread
- vwrite
- vmalloc
- vzalloc
- vmalloc_node
- vzalloc_node
- vmalloc_exec
- vmalloc_32
- vmalloc_32_user
- vmap
- vunmap
- vm_map_ram
- vm_unmap_ram
- vm_unmap_aliases
- vmalloc_sync_mappings
- vmalloc_sync_unmappings
- alloc_vm_area
- free_vm_area
- vm_insert_page
- vm_map_pages
- vm_map_pages_zero
- SYSCALL_DEFINE1
- mmap_init
- validate_nommu_regions
- validate_nommu_regions
- add_nommu_region
- delete_nommu_region
- free_page_series
- __put_nommu_region
- put_nommu_region
- add_vma_to_mm
- delete_vma_from_mm
- delete_vma
- find_vma
- find_extend_vma
- expand_stack
- find_vma_exact
- validate_mmap_request
- determine_vm_flags
- do_mmap_shared_file
- do_mmap_private
- do_mmap
- ksys_mmap_pgoff
- SYSCALL_DEFINE6
- SYSCALL_DEFINE1
- split_vma
- shrink_vma
- do_munmap
- vm_munmap
- SYSCALL_DEFINE2
- exit_mmap
- vm_brk
- do_mremap
- SYSCALL_DEFINE5
- follow_page
- remap_pfn_range
- vm_iomap_memory
- remap_vmalloc_range
- arch_get_unmapped_area
- filemap_fault
- filemap_map_pages
- __access_remote_vm
- access_remote_vm
- access_process_vm
- nommu_shrink_inode_mappings
- init_user_reserve
- init_admin_reserve
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/export.h>
20 #include <linux/mm.h>
21 #include <linux/sched/mm.h>
22 #include <linux/vmacache.h>
23 #include <linux/mman.h>
24 #include <linux/swap.h>
25 #include <linux/file.h>
26 #include <linux/highmem.h>
27 #include <linux/pagemap.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/blkdev.h>
31 #include <linux/backing-dev.h>
32 #include <linux/compiler.h>
33 #include <linux/mount.h>
34 #include <linux/personality.h>
35 #include <linux/security.h>
36 #include <linux/syscalls.h>
37 #include <linux/audit.h>
38 #include <linux/printk.h>
39
40 #include <linux/uaccess.h>
41 #include <asm/tlb.h>
42 #include <asm/tlbflush.h>
43 #include <asm/mmu_context.h>
44 #include "internal.h"
45
46 void *high_memory;
47 EXPORT_SYMBOL(high_memory);
48 struct page *mem_map;
49 unsigned long max_mapnr;
50 EXPORT_SYMBOL(max_mapnr);
51 unsigned long highest_memmap_pfn;
52 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
53 int heap_stack_gap = 0;
54
55 atomic_long_t mmap_pages_allocated;
56
57 EXPORT_SYMBOL(mem_map);
58
59
60 static struct kmem_cache *vm_region_jar;
61 struct rb_root nommu_region_tree = RB_ROOT;
62 DECLARE_RWSEM(nommu_region_sem);
63
64 const struct vm_operations_struct generic_file_vm_ops = {
65 };
66
67
68
69
70
71
72
73 unsigned int kobjsize(const void *objp)
74 {
75 struct page *page;
76
77
78
79
80
81 if (!objp || !virt_addr_valid(objp))
82 return 0;
83
84 page = virt_to_head_page(objp);
85
86
87
88
89
90 if (PageSlab(page))
91 return ksize(objp);
92
93
94
95
96
97
98
99 if (!PageCompound(page)) {
100 struct vm_area_struct *vma;
101
102 vma = find_vma(current->mm, (unsigned long)objp);
103 if (vma)
104 return vma->vm_end - vma->vm_start;
105 }
106
107
108
109
110
111 return page_size(page);
112 }
113
114
115
116
117
118
119
120
121
122
123
124 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
125 unsigned long *pfn)
126 {
127 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
128 return -EINVAL;
129
130 *pfn = address >> PAGE_SHIFT;
131 return 0;
132 }
133 EXPORT_SYMBOL(follow_pfn);
134
135 LIST_HEAD(vmap_area_list);
136
137 void vfree(const void *addr)
138 {
139 kfree(addr);
140 }
141 EXPORT_SYMBOL(vfree);
142
143 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
144 {
145
146
147
148
149 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
150 }
151 EXPORT_SYMBOL(__vmalloc);
152
153 void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags)
154 {
155 return __vmalloc(size, flags, PAGE_KERNEL);
156 }
157
158 void *vmalloc_user(unsigned long size)
159 {
160 void *ret;
161
162 ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
163 if (ret) {
164 struct vm_area_struct *vma;
165
166 down_write(¤t->mm->mmap_sem);
167 vma = find_vma(current->mm, (unsigned long)ret);
168 if (vma)
169 vma->vm_flags |= VM_USERMAP;
170 up_write(¤t->mm->mmap_sem);
171 }
172
173 return ret;
174 }
175 EXPORT_SYMBOL(vmalloc_user);
176
177 struct page *vmalloc_to_page(const void *addr)
178 {
179 return virt_to_page(addr);
180 }
181 EXPORT_SYMBOL(vmalloc_to_page);
182
183 unsigned long vmalloc_to_pfn(const void *addr)
184 {
185 return page_to_pfn(virt_to_page(addr));
186 }
187 EXPORT_SYMBOL(vmalloc_to_pfn);
188
189 long vread(char *buf, char *addr, unsigned long count)
190 {
191
192 if ((unsigned long) buf + count < count)
193 count = -(unsigned long) buf;
194
195 memcpy(buf, addr, count);
196 return count;
197 }
198
199 long vwrite(char *buf, char *addr, unsigned long count)
200 {
201
202 if ((unsigned long) addr + count < count)
203 count = -(unsigned long) addr;
204
205 memcpy(addr, buf, count);
206 return count;
207 }
208
209
210
211
212
213
214
215
216
217
218
219
220 void *vmalloc(unsigned long size)
221 {
222 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
223 }
224 EXPORT_SYMBOL(vmalloc);
225
226
227
228
229
230
231
232
233
234
235
236
237
238 void *vzalloc(unsigned long size)
239 {
240 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
241 PAGE_KERNEL);
242 }
243 EXPORT_SYMBOL(vzalloc);
244
245
246
247
248
249
250
251
252
253
254
255
256 void *vmalloc_node(unsigned long size, int node)
257 {
258 return vmalloc(size);
259 }
260 EXPORT_SYMBOL(vmalloc_node);
261
262
263
264
265
266
267
268
269
270
271
272
273
274 void *vzalloc_node(unsigned long size, int node)
275 {
276 return vzalloc(size);
277 }
278 EXPORT_SYMBOL(vzalloc_node);
279
280
281
282
283
284
285
286
287
288
289
290
291
292 void *vmalloc_exec(unsigned long size)
293 {
294 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
295 }
296
297
298
299
300
301
302
303
304 void *vmalloc_32(unsigned long size)
305 {
306 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
307 }
308 EXPORT_SYMBOL(vmalloc_32);
309
310
311
312
313
314
315
316
317
318
319
320 void *vmalloc_32_user(unsigned long size)
321 {
322
323
324
325
326 return vmalloc_user(size);
327 }
328 EXPORT_SYMBOL(vmalloc_32_user);
329
330 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
331 {
332 BUG();
333 return NULL;
334 }
335 EXPORT_SYMBOL(vmap);
336
337 void vunmap(const void *addr)
338 {
339 BUG();
340 }
341 EXPORT_SYMBOL(vunmap);
342
343 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
344 {
345 BUG();
346 return NULL;
347 }
348 EXPORT_SYMBOL(vm_map_ram);
349
350 void vm_unmap_ram(const void *mem, unsigned int count)
351 {
352 BUG();
353 }
354 EXPORT_SYMBOL(vm_unmap_ram);
355
356 void vm_unmap_aliases(void)
357 {
358 }
359 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
360
361
362
363
364
365 void __weak vmalloc_sync_mappings(void)
366 {
367 }
368
369 void __weak vmalloc_sync_unmappings(void)
370 {
371 }
372
373 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
374 {
375 BUG();
376 return NULL;
377 }
378 EXPORT_SYMBOL_GPL(alloc_vm_area);
379
380 void free_vm_area(struct vm_struct *area)
381 {
382 BUG();
383 }
384 EXPORT_SYMBOL_GPL(free_vm_area);
385
386 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
387 struct page *page)
388 {
389 return -EINVAL;
390 }
391 EXPORT_SYMBOL(vm_insert_page);
392
393 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
394 unsigned long num)
395 {
396 return -EINVAL;
397 }
398 EXPORT_SYMBOL(vm_map_pages);
399
400 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
401 unsigned long num)
402 {
403 return -EINVAL;
404 }
405 EXPORT_SYMBOL(vm_map_pages_zero);
406
407
408
409
410
411
412
413
414 SYSCALL_DEFINE1(brk, unsigned long, brk)
415 {
416 struct mm_struct *mm = current->mm;
417
418 if (brk < mm->start_brk || brk > mm->context.end_brk)
419 return mm->brk;
420
421 if (mm->brk == brk)
422 return mm->brk;
423
424
425
426
427 if (brk <= mm->brk) {
428 mm->brk = brk;
429 return brk;
430 }
431
432
433
434
435 flush_icache_range(mm->brk, brk);
436 return mm->brk = brk;
437 }
438
439
440
441
442 void __init mmap_init(void)
443 {
444 int ret;
445
446 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
447 VM_BUG_ON(ret);
448 vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
449 }
450
451
452
453
454
455 #ifdef CONFIG_DEBUG_NOMMU_REGIONS
456 static noinline void validate_nommu_regions(void)
457 {
458 struct vm_region *region, *last;
459 struct rb_node *p, *lastp;
460
461 lastp = rb_first(&nommu_region_tree);
462 if (!lastp)
463 return;
464
465 last = rb_entry(lastp, struct vm_region, vm_rb);
466 BUG_ON(last->vm_end <= last->vm_start);
467 BUG_ON(last->vm_top < last->vm_end);
468
469 while ((p = rb_next(lastp))) {
470 region = rb_entry(p, struct vm_region, vm_rb);
471 last = rb_entry(lastp, struct vm_region, vm_rb);
472
473 BUG_ON(region->vm_end <= region->vm_start);
474 BUG_ON(region->vm_top < region->vm_end);
475 BUG_ON(region->vm_start < last->vm_top);
476
477 lastp = p;
478 }
479 }
480 #else
481 static void validate_nommu_regions(void)
482 {
483 }
484 #endif
485
486
487
488
489 static void add_nommu_region(struct vm_region *region)
490 {
491 struct vm_region *pregion;
492 struct rb_node **p, *parent;
493
494 validate_nommu_regions();
495
496 parent = NULL;
497 p = &nommu_region_tree.rb_node;
498 while (*p) {
499 parent = *p;
500 pregion = rb_entry(parent, struct vm_region, vm_rb);
501 if (region->vm_start < pregion->vm_start)
502 p = &(*p)->rb_left;
503 else if (region->vm_start > pregion->vm_start)
504 p = &(*p)->rb_right;
505 else if (pregion == region)
506 return;
507 else
508 BUG();
509 }
510
511 rb_link_node(®ion->vm_rb, parent, p);
512 rb_insert_color(®ion->vm_rb, &nommu_region_tree);
513
514 validate_nommu_regions();
515 }
516
517
518
519
520 static void delete_nommu_region(struct vm_region *region)
521 {
522 BUG_ON(!nommu_region_tree.rb_node);
523
524 validate_nommu_regions();
525 rb_erase(®ion->vm_rb, &nommu_region_tree);
526 validate_nommu_regions();
527 }
528
529
530
531
532 static void free_page_series(unsigned long from, unsigned long to)
533 {
534 for (; from < to; from += PAGE_SIZE) {
535 struct page *page = virt_to_page(from);
536
537 atomic_long_dec(&mmap_pages_allocated);
538 put_page(page);
539 }
540 }
541
542
543
544
545
546
547
548 static void __put_nommu_region(struct vm_region *region)
549 __releases(nommu_region_sem)
550 {
551 BUG_ON(!nommu_region_tree.rb_node);
552
553 if (--region->vm_usage == 0) {
554 if (region->vm_top > region->vm_start)
555 delete_nommu_region(region);
556 up_write(&nommu_region_sem);
557
558 if (region->vm_file)
559 fput(region->vm_file);
560
561
562
563 if (region->vm_flags & VM_MAPPED_COPY)
564 free_page_series(region->vm_start, region->vm_top);
565 kmem_cache_free(vm_region_jar, region);
566 } else {
567 up_write(&nommu_region_sem);
568 }
569 }
570
571
572
573
574 static void put_nommu_region(struct vm_region *region)
575 {
576 down_write(&nommu_region_sem);
577 __put_nommu_region(region);
578 }
579
580
581
582
583
584
585
586 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
587 {
588 struct vm_area_struct *pvma, *prev;
589 struct address_space *mapping;
590 struct rb_node **p, *parent, *rb_prev;
591
592 BUG_ON(!vma->vm_region);
593
594 mm->map_count++;
595 vma->vm_mm = mm;
596
597
598 if (vma->vm_file) {
599 mapping = vma->vm_file->f_mapping;
600
601 i_mmap_lock_write(mapping);
602 flush_dcache_mmap_lock(mapping);
603 vma_interval_tree_insert(vma, &mapping->i_mmap);
604 flush_dcache_mmap_unlock(mapping);
605 i_mmap_unlock_write(mapping);
606 }
607
608
609 parent = rb_prev = NULL;
610 p = &mm->mm_rb.rb_node;
611 while (*p) {
612 parent = *p;
613 pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
614
615
616
617 if (vma->vm_start < pvma->vm_start)
618 p = &(*p)->rb_left;
619 else if (vma->vm_start > pvma->vm_start) {
620 rb_prev = parent;
621 p = &(*p)->rb_right;
622 } else if (vma->vm_end < pvma->vm_end)
623 p = &(*p)->rb_left;
624 else if (vma->vm_end > pvma->vm_end) {
625 rb_prev = parent;
626 p = &(*p)->rb_right;
627 } else if (vma < pvma)
628 p = &(*p)->rb_left;
629 else if (vma > pvma) {
630 rb_prev = parent;
631 p = &(*p)->rb_right;
632 } else
633 BUG();
634 }
635
636 rb_link_node(&vma->vm_rb, parent, p);
637 rb_insert_color(&vma->vm_rb, &mm->mm_rb);
638
639
640 prev = NULL;
641 if (rb_prev)
642 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
643
644 __vma_link_list(mm, vma, prev, parent);
645 }
646
647
648
649
650 static void delete_vma_from_mm(struct vm_area_struct *vma)
651 {
652 int i;
653 struct address_space *mapping;
654 struct mm_struct *mm = vma->vm_mm;
655 struct task_struct *curr = current;
656
657 mm->map_count--;
658 for (i = 0; i < VMACACHE_SIZE; i++) {
659
660 if (curr->vmacache.vmas[i] == vma) {
661 vmacache_invalidate(mm);
662 break;
663 }
664 }
665
666
667 if (vma->vm_file) {
668 mapping = vma->vm_file->f_mapping;
669
670 i_mmap_lock_write(mapping);
671 flush_dcache_mmap_lock(mapping);
672 vma_interval_tree_remove(vma, &mapping->i_mmap);
673 flush_dcache_mmap_unlock(mapping);
674 i_mmap_unlock_write(mapping);
675 }
676
677
678 rb_erase(&vma->vm_rb, &mm->mm_rb);
679
680 if (vma->vm_prev)
681 vma->vm_prev->vm_next = vma->vm_next;
682 else
683 mm->mmap = vma->vm_next;
684
685 if (vma->vm_next)
686 vma->vm_next->vm_prev = vma->vm_prev;
687 }
688
689
690
691
692 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
693 {
694 if (vma->vm_ops && vma->vm_ops->close)
695 vma->vm_ops->close(vma);
696 if (vma->vm_file)
697 fput(vma->vm_file);
698 put_nommu_region(vma->vm_region);
699 vm_area_free(vma);
700 }
701
702
703
704
705
706 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
707 {
708 struct vm_area_struct *vma;
709
710
711 vma = vmacache_find(mm, addr);
712 if (likely(vma))
713 return vma;
714
715
716
717 for (vma = mm->mmap; vma; vma = vma->vm_next) {
718 if (vma->vm_start > addr)
719 return NULL;
720 if (vma->vm_end > addr) {
721 vmacache_update(addr, vma);
722 return vma;
723 }
724 }
725
726 return NULL;
727 }
728 EXPORT_SYMBOL(find_vma);
729
730
731
732
733
734 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
735 {
736 return find_vma(mm, addr);
737 }
738
739
740
741
742
743 int expand_stack(struct vm_area_struct *vma, unsigned long address)
744 {
745 return -ENOMEM;
746 }
747
748
749
750
751
752 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
753 unsigned long addr,
754 unsigned long len)
755 {
756 struct vm_area_struct *vma;
757 unsigned long end = addr + len;
758
759
760 vma = vmacache_find_exact(mm, addr, end);
761 if (vma)
762 return vma;
763
764
765
766 for (vma = mm->mmap; vma; vma = vma->vm_next) {
767 if (vma->vm_start < addr)
768 continue;
769 if (vma->vm_start > addr)
770 return NULL;
771 if (vma->vm_end == end) {
772 vmacache_update(addr, vma);
773 return vma;
774 }
775 }
776
777 return NULL;
778 }
779
780
781
782
783
784 static int validate_mmap_request(struct file *file,
785 unsigned long addr,
786 unsigned long len,
787 unsigned long prot,
788 unsigned long flags,
789 unsigned long pgoff,
790 unsigned long *_capabilities)
791 {
792 unsigned long capabilities, rlen;
793 int ret;
794
795
796 if (flags & MAP_FIXED)
797 return -EINVAL;
798
799 if ((flags & MAP_TYPE) != MAP_PRIVATE &&
800 (flags & MAP_TYPE) != MAP_SHARED)
801 return -EINVAL;
802
803 if (!len)
804 return -EINVAL;
805
806
807 rlen = PAGE_ALIGN(len);
808 if (!rlen || rlen > TASK_SIZE)
809 return -ENOMEM;
810
811
812 if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
813 return -EOVERFLOW;
814
815 if (file) {
816
817 if (!file->f_op->mmap)
818 return -ENODEV;
819
820
821
822
823
824 if (file->f_op->mmap_capabilities) {
825 capabilities = file->f_op->mmap_capabilities(file);
826 } else {
827
828
829 switch (file_inode(file)->i_mode & S_IFMT) {
830 case S_IFREG:
831 case S_IFBLK:
832 capabilities = NOMMU_MAP_COPY;
833 break;
834
835 case S_IFCHR:
836 capabilities =
837 NOMMU_MAP_DIRECT |
838 NOMMU_MAP_READ |
839 NOMMU_MAP_WRITE;
840 break;
841
842 default:
843 return -EINVAL;
844 }
845 }
846
847
848
849 if (!file->f_op->get_unmapped_area)
850 capabilities &= ~NOMMU_MAP_DIRECT;
851 if (!(file->f_mode & FMODE_CAN_READ))
852 capabilities &= ~NOMMU_MAP_COPY;
853
854
855 if (!(file->f_mode & FMODE_READ))
856 return -EACCES;
857
858 if (flags & MAP_SHARED) {
859
860 if ((prot & PROT_WRITE) &&
861 !(file->f_mode & FMODE_WRITE))
862 return -EACCES;
863
864 if (IS_APPEND(file_inode(file)) &&
865 (file->f_mode & FMODE_WRITE))
866 return -EACCES;
867
868 if (locks_verify_locked(file))
869 return -EAGAIN;
870
871 if (!(capabilities & NOMMU_MAP_DIRECT))
872 return -ENODEV;
873
874
875 capabilities &= ~NOMMU_MAP_COPY;
876 } else {
877
878
879 if (!(capabilities & NOMMU_MAP_COPY))
880 return -ENODEV;
881
882
883
884 if (prot & PROT_WRITE)
885 capabilities &= ~NOMMU_MAP_DIRECT;
886 }
887
888 if (capabilities & NOMMU_MAP_DIRECT) {
889 if (((prot & PROT_READ) && !(capabilities & NOMMU_MAP_READ)) ||
890 ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
891 ((prot & PROT_EXEC) && !(capabilities & NOMMU_MAP_EXEC))
892 ) {
893 capabilities &= ~NOMMU_MAP_DIRECT;
894 if (flags & MAP_SHARED) {
895 pr_warn("MAP_SHARED not completely supported on !MMU\n");
896 return -EINVAL;
897 }
898 }
899 }
900
901
902
903 if (path_noexec(&file->f_path)) {
904 if (prot & PROT_EXEC)
905 return -EPERM;
906 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
907
908 if (current->personality & READ_IMPLIES_EXEC) {
909 if (capabilities & NOMMU_MAP_EXEC)
910 prot |= PROT_EXEC;
911 }
912 } else if ((prot & PROT_READ) &&
913 (prot & PROT_EXEC) &&
914 !(capabilities & NOMMU_MAP_EXEC)
915 ) {
916
917 capabilities &= ~NOMMU_MAP_DIRECT;
918 }
919 } else {
920
921
922
923 capabilities = NOMMU_MAP_COPY;
924
925
926 if ((prot & PROT_READ) &&
927 (current->personality & READ_IMPLIES_EXEC))
928 prot |= PROT_EXEC;
929 }
930
931
932 ret = security_mmap_addr(addr);
933 if (ret < 0)
934 return ret;
935
936
937 *_capabilities = capabilities;
938 return 0;
939 }
940
941
942
943
944
945 static unsigned long determine_vm_flags(struct file *file,
946 unsigned long prot,
947 unsigned long flags,
948 unsigned long capabilities)
949 {
950 unsigned long vm_flags;
951
952 vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
953
954
955 if (!(capabilities & NOMMU_MAP_DIRECT)) {
956
957 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
958 if (file && !(prot & PROT_WRITE))
959 vm_flags |= VM_MAYSHARE;
960 } else {
961
962
963
964 vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS);
965 if (flags & MAP_SHARED)
966 vm_flags |= VM_SHARED;
967 }
968
969
970
971
972
973 if ((flags & MAP_PRIVATE) && current->ptrace)
974 vm_flags &= ~VM_MAYSHARE;
975
976 return vm_flags;
977 }
978
979
980
981
982
983 static int do_mmap_shared_file(struct vm_area_struct *vma)
984 {
985 int ret;
986
987 ret = call_mmap(vma->vm_file, vma);
988 if (ret == 0) {
989 vma->vm_region->vm_top = vma->vm_region->vm_end;
990 return 0;
991 }
992 if (ret != -ENOSYS)
993 return ret;
994
995
996
997
998 return -ENODEV;
999 }
1000
1001
1002
1003
1004 static int do_mmap_private(struct vm_area_struct *vma,
1005 struct vm_region *region,
1006 unsigned long len,
1007 unsigned long capabilities)
1008 {
1009 unsigned long total, point;
1010 void *base;
1011 int ret, order;
1012
1013
1014
1015
1016
1017 if (capabilities & NOMMU_MAP_DIRECT) {
1018 ret = call_mmap(vma->vm_file, vma);
1019 if (ret == 0) {
1020
1021 BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
1022 vma->vm_region->vm_top = vma->vm_region->vm_end;
1023 return 0;
1024 }
1025 if (ret != -ENOSYS)
1026 return ret;
1027
1028
1029
1030
1031 }
1032
1033
1034
1035
1036
1037
1038 order = get_order(len);
1039 total = 1 << order;
1040 point = len >> PAGE_SHIFT;
1041
1042
1043 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
1044 total = point;
1045
1046 base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
1047 if (!base)
1048 goto enomem;
1049
1050 atomic_long_add(total, &mmap_pages_allocated);
1051
1052 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1053 region->vm_start = (unsigned long) base;
1054 region->vm_end = region->vm_start + len;
1055 region->vm_top = region->vm_start + (total << PAGE_SHIFT);
1056
1057 vma->vm_start = region->vm_start;
1058 vma->vm_end = region->vm_start + len;
1059
1060 if (vma->vm_file) {
1061
1062 loff_t fpos;
1063
1064 fpos = vma->vm_pgoff;
1065 fpos <<= PAGE_SHIFT;
1066
1067 ret = kernel_read(vma->vm_file, base, len, &fpos);
1068 if (ret < 0)
1069 goto error_free;
1070
1071
1072 if (ret < len)
1073 memset(base + ret, 0, len - ret);
1074
1075 } else {
1076 vma_set_anonymous(vma);
1077 }
1078
1079 return 0;
1080
1081 error_free:
1082 free_page_series(region->vm_start, region->vm_top);
1083 region->vm_start = vma->vm_start = 0;
1084 region->vm_end = vma->vm_end = 0;
1085 region->vm_top = 0;
1086 return ret;
1087
1088 enomem:
1089 pr_err("Allocation of length %lu from process %d (%s) failed\n",
1090 len, current->pid, current->comm);
1091 show_free_areas(0, NULL);
1092 return -ENOMEM;
1093 }
1094
1095
1096
1097
1098 unsigned long do_mmap(struct file *file,
1099 unsigned long addr,
1100 unsigned long len,
1101 unsigned long prot,
1102 unsigned long flags,
1103 vm_flags_t vm_flags,
1104 unsigned long pgoff,
1105 unsigned long *populate,
1106 struct list_head *uf)
1107 {
1108 struct vm_area_struct *vma;
1109 struct vm_region *region;
1110 struct rb_node *rb;
1111 unsigned long capabilities, result;
1112 int ret;
1113
1114 *populate = 0;
1115
1116
1117
1118 ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1119 &capabilities);
1120 if (ret < 0)
1121 return ret;
1122
1123
1124 addr = 0;
1125 len = PAGE_ALIGN(len);
1126
1127
1128
1129 vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
1130
1131
1132 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1133 if (!region)
1134 goto error_getting_region;
1135
1136 vma = vm_area_alloc(current->mm);
1137 if (!vma)
1138 goto error_getting_vma;
1139
1140 region->vm_usage = 1;
1141 region->vm_flags = vm_flags;
1142 region->vm_pgoff = pgoff;
1143
1144 vma->vm_flags = vm_flags;
1145 vma->vm_pgoff = pgoff;
1146
1147 if (file) {
1148 region->vm_file = get_file(file);
1149 vma->vm_file = get_file(file);
1150 }
1151
1152 down_write(&nommu_region_sem);
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162 if (vm_flags & VM_MAYSHARE) {
1163 struct vm_region *pregion;
1164 unsigned long pglen, rpglen, pgend, rpgend, start;
1165
1166 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1167 pgend = pgoff + pglen;
1168
1169 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1170 pregion = rb_entry(rb, struct vm_region, vm_rb);
1171
1172 if (!(pregion->vm_flags & VM_MAYSHARE))
1173 continue;
1174
1175
1176 if (file_inode(pregion->vm_file) !=
1177 file_inode(file))
1178 continue;
1179
1180 if (pregion->vm_pgoff >= pgend)
1181 continue;
1182
1183 rpglen = pregion->vm_end - pregion->vm_start;
1184 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1185 rpgend = pregion->vm_pgoff + rpglen;
1186 if (pgoff >= rpgend)
1187 continue;
1188
1189
1190
1191 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1192 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1193
1194 if (!(capabilities & NOMMU_MAP_DIRECT))
1195 goto sharing_violation;
1196 continue;
1197 }
1198
1199
1200 pregion->vm_usage++;
1201 vma->vm_region = pregion;
1202 start = pregion->vm_start;
1203 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1204 vma->vm_start = start;
1205 vma->vm_end = start + len;
1206
1207 if (pregion->vm_flags & VM_MAPPED_COPY)
1208 vma->vm_flags |= VM_MAPPED_COPY;
1209 else {
1210 ret = do_mmap_shared_file(vma);
1211 if (ret < 0) {
1212 vma->vm_region = NULL;
1213 vma->vm_start = 0;
1214 vma->vm_end = 0;
1215 pregion->vm_usage--;
1216 pregion = NULL;
1217 goto error_just_free;
1218 }
1219 }
1220 fput(region->vm_file);
1221 kmem_cache_free(vm_region_jar, region);
1222 region = pregion;
1223 result = start;
1224 goto share;
1225 }
1226
1227
1228
1229
1230
1231 if (capabilities & NOMMU_MAP_DIRECT) {
1232 addr = file->f_op->get_unmapped_area(file, addr, len,
1233 pgoff, flags);
1234 if (IS_ERR_VALUE(addr)) {
1235 ret = addr;
1236 if (ret != -ENOSYS)
1237 goto error_just_free;
1238
1239
1240
1241
1242 ret = -ENODEV;
1243 if (!(capabilities & NOMMU_MAP_COPY))
1244 goto error_just_free;
1245
1246 capabilities &= ~NOMMU_MAP_DIRECT;
1247 } else {
1248 vma->vm_start = region->vm_start = addr;
1249 vma->vm_end = region->vm_end = addr + len;
1250 }
1251 }
1252 }
1253
1254 vma->vm_region = region;
1255
1256
1257
1258
1259 if (file && vma->vm_flags & VM_SHARED)
1260 ret = do_mmap_shared_file(vma);
1261 else
1262 ret = do_mmap_private(vma, region, len, capabilities);
1263 if (ret < 0)
1264 goto error_just_free;
1265 add_nommu_region(region);
1266
1267
1268 if (!vma->vm_file &&
1269 (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
1270 !(flags & MAP_UNINITIALIZED)))
1271 memset((void *)region->vm_start, 0,
1272 region->vm_end - region->vm_start);
1273
1274
1275 result = vma->vm_start;
1276
1277 current->mm->total_vm += len >> PAGE_SHIFT;
1278
1279 share:
1280 add_vma_to_mm(current->mm, vma);
1281
1282
1283
1284 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1285 flush_icache_range(region->vm_start, region->vm_end);
1286 region->vm_icache_flushed = true;
1287 }
1288
1289 up_write(&nommu_region_sem);
1290
1291 return result;
1292
1293 error_just_free:
1294 up_write(&nommu_region_sem);
1295 error:
1296 if (region->vm_file)
1297 fput(region->vm_file);
1298 kmem_cache_free(vm_region_jar, region);
1299 if (vma->vm_file)
1300 fput(vma->vm_file);
1301 vm_area_free(vma);
1302 return ret;
1303
1304 sharing_violation:
1305 up_write(&nommu_region_sem);
1306 pr_warn("Attempt to share mismatched mappings\n");
1307 ret = -EINVAL;
1308 goto error;
1309
1310 error_getting_vma:
1311 kmem_cache_free(vm_region_jar, region);
1312 pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1313 len, current->pid);
1314 show_free_areas(0, NULL);
1315 return -ENOMEM;
1316
1317 error_getting_region:
1318 pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1319 len, current->pid);
1320 show_free_areas(0, NULL);
1321 return -ENOMEM;
1322 }
1323
1324 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1325 unsigned long prot, unsigned long flags,
1326 unsigned long fd, unsigned long pgoff)
1327 {
1328 struct file *file = NULL;
1329 unsigned long retval = -EBADF;
1330
1331 audit_mmap_fd(fd, flags);
1332 if (!(flags & MAP_ANONYMOUS)) {
1333 file = fget(fd);
1334 if (!file)
1335 goto out;
1336 }
1337
1338 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1339
1340 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1341
1342 if (file)
1343 fput(file);
1344 out:
1345 return retval;
1346 }
1347
1348 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1349 unsigned long, prot, unsigned long, flags,
1350 unsigned long, fd, unsigned long, pgoff)
1351 {
1352 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1353 }
1354
1355 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1356 struct mmap_arg_struct {
1357 unsigned long addr;
1358 unsigned long len;
1359 unsigned long prot;
1360 unsigned long flags;
1361 unsigned long fd;
1362 unsigned long offset;
1363 };
1364
1365 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1366 {
1367 struct mmap_arg_struct a;
1368
1369 if (copy_from_user(&a, arg, sizeof(a)))
1370 return -EFAULT;
1371 if (offset_in_page(a.offset))
1372 return -EINVAL;
1373
1374 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1375 a.offset >> PAGE_SHIFT);
1376 }
1377 #endif
1378
1379
1380
1381
1382
1383 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1384 unsigned long addr, int new_below)
1385 {
1386 struct vm_area_struct *new;
1387 struct vm_region *region;
1388 unsigned long npages;
1389
1390
1391
1392 if (vma->vm_file)
1393 return -ENOMEM;
1394
1395 if (mm->map_count >= sysctl_max_map_count)
1396 return -ENOMEM;
1397
1398 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1399 if (!region)
1400 return -ENOMEM;
1401
1402 new = vm_area_dup(vma);
1403 if (!new) {
1404 kmem_cache_free(vm_region_jar, region);
1405 return -ENOMEM;
1406 }
1407
1408
1409 *region = *vma->vm_region;
1410 new->vm_region = region;
1411
1412 npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1413
1414 if (new_below) {
1415 region->vm_top = region->vm_end = new->vm_end = addr;
1416 } else {
1417 region->vm_start = new->vm_start = addr;
1418 region->vm_pgoff = new->vm_pgoff += npages;
1419 }
1420
1421 if (new->vm_ops && new->vm_ops->open)
1422 new->vm_ops->open(new);
1423
1424 delete_vma_from_mm(vma);
1425 down_write(&nommu_region_sem);
1426 delete_nommu_region(vma->vm_region);
1427 if (new_below) {
1428 vma->vm_region->vm_start = vma->vm_start = addr;
1429 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1430 } else {
1431 vma->vm_region->vm_end = vma->vm_end = addr;
1432 vma->vm_region->vm_top = addr;
1433 }
1434 add_nommu_region(vma->vm_region);
1435 add_nommu_region(new->vm_region);
1436 up_write(&nommu_region_sem);
1437 add_vma_to_mm(mm, vma);
1438 add_vma_to_mm(mm, new);
1439 return 0;
1440 }
1441
1442
1443
1444
1445
1446 static int shrink_vma(struct mm_struct *mm,
1447 struct vm_area_struct *vma,
1448 unsigned long from, unsigned long to)
1449 {
1450 struct vm_region *region;
1451
1452
1453
1454 delete_vma_from_mm(vma);
1455 if (from > vma->vm_start)
1456 vma->vm_end = from;
1457 else
1458 vma->vm_start = to;
1459 add_vma_to_mm(mm, vma);
1460
1461
1462 region = vma->vm_region;
1463 BUG_ON(region->vm_usage != 1);
1464
1465 down_write(&nommu_region_sem);
1466 delete_nommu_region(region);
1467 if (from > region->vm_start) {
1468 to = region->vm_top;
1469 region->vm_top = region->vm_end = from;
1470 } else {
1471 region->vm_start = to;
1472 }
1473 add_nommu_region(region);
1474 up_write(&nommu_region_sem);
1475
1476 free_page_series(from, to);
1477 return 0;
1478 }
1479
1480
1481
1482
1483
1484
1485 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1486 {
1487 struct vm_area_struct *vma;
1488 unsigned long end;
1489 int ret;
1490
1491 len = PAGE_ALIGN(len);
1492 if (len == 0)
1493 return -EINVAL;
1494
1495 end = start + len;
1496
1497
1498 vma = find_vma(mm, start);
1499 if (!vma) {
1500 static int limit;
1501 if (limit < 5) {
1502 pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1503 current->pid, current->comm,
1504 start, start + len - 1);
1505 limit++;
1506 }
1507 return -EINVAL;
1508 }
1509
1510
1511 if (vma->vm_file) {
1512 do {
1513 if (start > vma->vm_start)
1514 return -EINVAL;
1515 if (end == vma->vm_end)
1516 goto erase_whole_vma;
1517 vma = vma->vm_next;
1518 } while (vma);
1519 return -EINVAL;
1520 } else {
1521
1522 if (start == vma->vm_start && end == vma->vm_end)
1523 goto erase_whole_vma;
1524 if (start < vma->vm_start || end > vma->vm_end)
1525 return -EINVAL;
1526 if (offset_in_page(start))
1527 return -EINVAL;
1528 if (end != vma->vm_end && offset_in_page(end))
1529 return -EINVAL;
1530 if (start != vma->vm_start && end != vma->vm_end) {
1531 ret = split_vma(mm, vma, start, 1);
1532 if (ret < 0)
1533 return ret;
1534 }
1535 return shrink_vma(mm, vma, start, end);
1536 }
1537
1538 erase_whole_vma:
1539 delete_vma_from_mm(vma);
1540 delete_vma(mm, vma);
1541 return 0;
1542 }
1543 EXPORT_SYMBOL(do_munmap);
1544
1545 int vm_munmap(unsigned long addr, size_t len)
1546 {
1547 struct mm_struct *mm = current->mm;
1548 int ret;
1549
1550 down_write(&mm->mmap_sem);
1551 ret = do_munmap(mm, addr, len, NULL);
1552 up_write(&mm->mmap_sem);
1553 return ret;
1554 }
1555 EXPORT_SYMBOL(vm_munmap);
1556
1557 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1558 {
1559 return vm_munmap(addr, len);
1560 }
1561
1562
1563
1564
1565 void exit_mmap(struct mm_struct *mm)
1566 {
1567 struct vm_area_struct *vma;
1568
1569 if (!mm)
1570 return;
1571
1572 mm->total_vm = 0;
1573
1574 while ((vma = mm->mmap)) {
1575 mm->mmap = vma->vm_next;
1576 delete_vma_from_mm(vma);
1577 delete_vma(mm, vma);
1578 cond_resched();
1579 }
1580 }
1581
1582 int vm_brk(unsigned long addr, unsigned long len)
1583 {
1584 return -ENOMEM;
1585 }
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597 static unsigned long do_mremap(unsigned long addr,
1598 unsigned long old_len, unsigned long new_len,
1599 unsigned long flags, unsigned long new_addr)
1600 {
1601 struct vm_area_struct *vma;
1602
1603
1604 old_len = PAGE_ALIGN(old_len);
1605 new_len = PAGE_ALIGN(new_len);
1606 if (old_len == 0 || new_len == 0)
1607 return (unsigned long) -EINVAL;
1608
1609 if (offset_in_page(addr))
1610 return -EINVAL;
1611
1612 if (flags & MREMAP_FIXED && new_addr != addr)
1613 return (unsigned long) -EINVAL;
1614
1615 vma = find_vma_exact(current->mm, addr, old_len);
1616 if (!vma)
1617 return (unsigned long) -EINVAL;
1618
1619 if (vma->vm_end != vma->vm_start + old_len)
1620 return (unsigned long) -EFAULT;
1621
1622 if (vma->vm_flags & VM_MAYSHARE)
1623 return (unsigned long) -EPERM;
1624
1625 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1626 return (unsigned long) -ENOMEM;
1627
1628
1629 vma->vm_end = vma->vm_start + new_len;
1630 return vma->vm_start;
1631 }
1632
1633 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1634 unsigned long, new_len, unsigned long, flags,
1635 unsigned long, new_addr)
1636 {
1637 unsigned long ret;
1638
1639 down_write(¤t->mm->mmap_sem);
1640 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1641 up_write(¤t->mm->mmap_sem);
1642 return ret;
1643 }
1644
1645 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1646 unsigned int foll_flags)
1647 {
1648 return NULL;
1649 }
1650
1651 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1652 unsigned long pfn, unsigned long size, pgprot_t prot)
1653 {
1654 if (addr != (pfn << PAGE_SHIFT))
1655 return -EINVAL;
1656
1657 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1658 return 0;
1659 }
1660 EXPORT_SYMBOL(remap_pfn_range);
1661
1662 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1663 {
1664 unsigned long pfn = start >> PAGE_SHIFT;
1665 unsigned long vm_len = vma->vm_end - vma->vm_start;
1666
1667 pfn += vma->vm_pgoff;
1668 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1669 }
1670 EXPORT_SYMBOL(vm_iomap_memory);
1671
1672 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1673 unsigned long pgoff)
1674 {
1675 unsigned int size = vma->vm_end - vma->vm_start;
1676
1677 if (!(vma->vm_flags & VM_USERMAP))
1678 return -EINVAL;
1679
1680 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1681 vma->vm_end = vma->vm_start + size;
1682
1683 return 0;
1684 }
1685 EXPORT_SYMBOL(remap_vmalloc_range);
1686
1687 unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1688 unsigned long len, unsigned long pgoff, unsigned long flags)
1689 {
1690 return -ENOMEM;
1691 }
1692
1693 vm_fault_t filemap_fault(struct vm_fault *vmf)
1694 {
1695 BUG();
1696 return 0;
1697 }
1698 EXPORT_SYMBOL(filemap_fault);
1699
1700 void filemap_map_pages(struct vm_fault *vmf,
1701 pgoff_t start_pgoff, pgoff_t end_pgoff)
1702 {
1703 BUG();
1704 }
1705 EXPORT_SYMBOL(filemap_map_pages);
1706
1707 int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1708 unsigned long addr, void *buf, int len, unsigned int gup_flags)
1709 {
1710 struct vm_area_struct *vma;
1711 int write = gup_flags & FOLL_WRITE;
1712
1713 if (down_read_killable(&mm->mmap_sem))
1714 return 0;
1715
1716
1717 vma = find_vma(mm, addr);
1718 if (vma) {
1719
1720 if (addr + len >= vma->vm_end)
1721 len = vma->vm_end - addr;
1722
1723
1724 if (write && vma->vm_flags & VM_MAYWRITE)
1725 copy_to_user_page(vma, NULL, addr,
1726 (void *) addr, buf, len);
1727 else if (!write && vma->vm_flags & VM_MAYREAD)
1728 copy_from_user_page(vma, NULL, addr,
1729 buf, (void *) addr, len);
1730 else
1731 len = 0;
1732 } else {
1733 len = 0;
1734 }
1735
1736 up_read(&mm->mmap_sem);
1737
1738 return len;
1739 }
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1752 void *buf, int len, unsigned int gup_flags)
1753 {
1754 return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
1755 }
1756
1757
1758
1759
1760
1761 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1762 unsigned int gup_flags)
1763 {
1764 struct mm_struct *mm;
1765
1766 if (addr + len < addr)
1767 return 0;
1768
1769 mm = get_task_mm(tsk);
1770 if (!mm)
1771 return 0;
1772
1773 len = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
1774
1775 mmput(mm);
1776 return len;
1777 }
1778 EXPORT_SYMBOL_GPL(access_process_vm);
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791 int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1792 size_t newsize)
1793 {
1794 struct vm_area_struct *vma;
1795 struct vm_region *region;
1796 pgoff_t low, high;
1797 size_t r_size, r_top;
1798
1799 low = newsize >> PAGE_SHIFT;
1800 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1801
1802 down_write(&nommu_region_sem);
1803 i_mmap_lock_read(inode->i_mapping);
1804
1805
1806 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1807
1808
1809 if (vma->vm_flags & VM_SHARED) {
1810 i_mmap_unlock_read(inode->i_mapping);
1811 up_write(&nommu_region_sem);
1812 return -ETXTBSY;
1813 }
1814 }
1815
1816
1817
1818
1819
1820
1821
1822 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1823 if (!(vma->vm_flags & VM_SHARED))
1824 continue;
1825
1826 region = vma->vm_region;
1827 r_size = region->vm_top - region->vm_start;
1828 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1829
1830 if (r_top > newsize) {
1831 region->vm_top -= r_top - newsize;
1832 if (region->vm_end > region->vm_top)
1833 region->vm_end = region->vm_top;
1834 }
1835 }
1836
1837 i_mmap_unlock_read(inode->i_mapping);
1838 up_write(&nommu_region_sem);
1839 return 0;
1840 }
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852 static int __meminit init_user_reserve(void)
1853 {
1854 unsigned long free_kbytes;
1855
1856 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1857
1858 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1859 return 0;
1860 }
1861 subsys_initcall(init_user_reserve);
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873 static int __meminit init_admin_reserve(void)
1874 {
1875 unsigned long free_kbytes;
1876
1877 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1878
1879 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1880 return 0;
1881 }
1882 subsys_initcall(init_admin_reserve);