Lines Matching refs:mm

61 static void unmap_region(struct mm_struct *mm,
154 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) in __vm_enough_memory() argument
222 if (mm) { in __vm_enough_memory()
224 allowed -= min_t(long, mm->total_vm / 32, reserve); in __vm_enough_memory()
290 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1() local
294 down_write(&mm->mmap_sem); in SYSCALL_DEFINE1()
303 min_brk = mm->start_brk; in SYSCALL_DEFINE1()
305 min_brk = mm->end_data; in SYSCALL_DEFINE1()
307 min_brk = mm->start_brk; in SYSCALL_DEFINE1()
318 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, in SYSCALL_DEFINE1()
319 mm->end_data, mm->start_data)) in SYSCALL_DEFINE1()
323 oldbrk = PAGE_ALIGN(mm->brk); in SYSCALL_DEFINE1()
328 if (brk <= mm->brk) { in SYSCALL_DEFINE1()
329 if (!do_munmap(mm, newbrk, oldbrk-newbrk)) in SYSCALL_DEFINE1()
335 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) in SYSCALL_DEFINE1()
343 mm->brk = brk; in SYSCALL_DEFINE1()
344 populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; in SYSCALL_DEFINE1()
345 up_write(&mm->mmap_sem); in SYSCALL_DEFINE1()
351 retval = mm->brk; in SYSCALL_DEFINE1()
352 up_write(&mm->mmap_sem); in SYSCALL_DEFINE1()
436 static void validate_mm(struct mm_struct *mm) in validate_mm() argument
441 struct vm_area_struct *vma = mm->mmap; in validate_mm()
458 if (i != mm->map_count) { in validate_mm()
459 pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); in validate_mm()
462 if (highest_address != mm->highest_vm_end) { in validate_mm()
464 mm->highest_vm_end, highest_address); in validate_mm()
467 i = browse_rb(&mm->mm_rb); in validate_mm()
468 if (i != mm->map_count) { in validate_mm()
470 pr_emerg("map_count %d rb %d\n", mm->map_count, i); in validate_mm()
473 VM_BUG_ON_MM(bug, mm); in validate_mm()
477 #define validate_mm(mm) do { } while (0) argument
554 static int find_vma_links(struct mm_struct *mm, unsigned long addr, in find_vma_links() argument
560 __rb_link = &mm->mm_rb.rb_node; in find_vma_links()
588 static unsigned long count_vma_pages_range(struct mm_struct *mm, in count_vma_pages_range() argument
595 vma = find_vma_intersection(mm, addr, end); in count_vma_pages_range()
616 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link_rb() argument
623 mm->highest_vm_end = vma->vm_end; in __vma_link_rb()
637 vma_rb_insert(vma, &mm->mm_rb); in __vma_link_rb()
660 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link() argument
664 __vma_link_list(mm, vma, prev, rb_parent); in __vma_link()
665 __vma_link_rb(mm, vma, rb_link, rb_parent); in __vma_link()
668 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, in vma_link() argument
679 __vma_link(mm, vma, prev, rb_link, rb_parent); in vma_link()
685 mm->map_count++; in vma_link()
686 validate_mm(mm); in vma_link()
693 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in __insert_vm_struct() argument
698 if (find_vma_links(mm, vma->vm_start, vma->vm_end, in __insert_vm_struct()
701 __vma_link(mm, vma, prev, rb_link, rb_parent); in __insert_vm_struct()
702 mm->map_count++; in __insert_vm_struct()
706 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_unlink() argument
711 vma_rb_erase(vma, &mm->mm_rb); in __vma_unlink()
717 vmacache_invalidate(mm); in __vma_unlink()
730 struct mm_struct *mm = vma->vm_mm; in vma_adjust() local
854 __vma_unlink(mm, next, vma); in vma_adjust()
863 __insert_vm_struct(mm, insert); in vma_adjust()
869 mm->highest_vm_end = end; in vma_adjust()
898 mm->map_count--; in vma_adjust()
912 mm->highest_vm_end = end; in vma_adjust()
917 validate_mm(mm); in vma_adjust()
1040 struct vm_area_struct *vma_merge(struct mm_struct *mm, in vma_merge() argument
1061 next = mm->mmap; in vma_merge()
1216 void vm_stat_account(struct mm_struct *mm, unsigned long flags, in vm_stat_account() argument
1222 mm->total_vm += pages; in vm_stat_account()
1225 mm->shared_vm += pages; in vm_stat_account()
1227 mm->exec_vm += pages; in vm_stat_account()
1229 mm->stack_vm += pages; in vm_stat_account()
1246 static inline int mlock_future_check(struct mm_struct *mm, in mlock_future_check() argument
1255 locked += mm->locked_vm; in mlock_future_check()
1272 struct mm_struct *mm = current->mm; in do_mmap() local
1302 if (mm->map_count > sysctl_max_map_count) in do_mmap()
1317 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; in do_mmap()
1323 if (mlock_future_check(mm, vm_flags, len)) in do_mmap()
1544 struct mm_struct *mm = current->mm; in mmap_region() local
1551 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) { in mmap_region()
1561 nr_pages = count_vma_pages_range(mm, addr, addr + len); in mmap_region()
1563 if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages)) in mmap_region()
1568 while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, in mmap_region()
1570 if (do_munmap(mm, addr, len)) in mmap_region()
1579 if (security_vm_enough_memory_mm(mm, charged)) in mmap_region()
1587 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, in mmap_region()
1603 vma->vm_mm = mm; in mmap_region()
1650 vma_link(mm, vma, prev, rb_link, rb_parent); in mmap_region()
1662 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); in mmap_region()
1665 vma == get_gate_vma(current->mm))) in mmap_region()
1666 mm->locked_vm += (len >> PAGE_SHIFT); in mmap_region()
1692 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); in mmap_region()
1717 struct mm_struct *mm = current->mm; in unmapped_area() local
1736 if (RB_EMPTY_ROOT(&mm->mm_rb)) in unmapped_area()
1738 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area()
1791 gap_start = mm->highest_vm_end; in unmapped_area()
1811 struct mm_struct *mm = current->mm; in unmapped_area_topdown() local
1834 gap_start = mm->highest_vm_end; in unmapped_area_topdown()
1839 if (RB_EMPTY_ROOT(&mm->mm_rb)) in unmapped_area_topdown()
1841 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area_topdown()
1923 struct mm_struct *mm = current->mm; in arch_get_unmapped_area() local
1935 vma = find_vma(mm, addr); in arch_get_unmapped_area()
1943 info.low_limit = mm->mmap_base; in arch_get_unmapped_area()
1961 struct mm_struct *mm = current->mm; in arch_get_unmapped_area_topdown() local
1975 vma = find_vma(mm, addr); in arch_get_unmapped_area_topdown()
1984 info.high_limit = mm->mmap_base; in arch_get_unmapped_area_topdown()
2021 get_area = current->mm->get_unmapped_area; in get_unmapped_area()
2041 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) in find_vma() argument
2047 vma = vmacache_find(mm, addr); in find_vma()
2051 rb_node = mm->mm_rb.rb_node; in find_vma()
2078 find_vma_prev(struct mm_struct *mm, unsigned long addr, in find_vma_prev() argument
2083 vma = find_vma(mm, addr); in find_vma_prev()
2087 struct rb_node *rb_node = mm->mm_rb.rb_node; in find_vma_prev()
2104 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth() local
2109 if (!may_expand_vm(mm, grow)) in acct_stack_growth()
2123 locked = mm->locked_vm + grow; in acct_stack_growth()
2140 if (security_vm_enough_memory_mm(mm, grow)) in acct_stack_growth()
2153 struct mm_struct *mm = vma->vm_mm; in expand_upwards() local
2198 spin_lock(&mm->page_table_lock); in expand_upwards()
2200 mm->locked_vm += grow; in expand_upwards()
2201 vm_stat_account(mm, vma->vm_flags, in expand_upwards()
2209 mm->highest_vm_end = address; in expand_upwards()
2210 spin_unlock(&mm->page_table_lock); in expand_upwards()
2218 validate_mm(mm); in expand_upwards()
2229 struct mm_struct *mm = vma->vm_mm; in expand_downwards() local
2270 spin_lock(&mm->page_table_lock); in expand_downwards()
2272 mm->locked_vm += grow; in expand_downwards()
2273 vm_stat_account(mm, vma->vm_flags, in expand_downwards()
2280 spin_unlock(&mm->page_table_lock); in expand_downwards()
2288 validate_mm(mm); in expand_downwards()
2318 find_extend_vma(struct mm_struct *mm, unsigned long addr) in find_extend_vma() argument
2323 vma = find_vma_prev(mm, addr, &prev); in find_extend_vma()
2347 find_extend_vma(struct mm_struct *mm, unsigned long addr) in find_extend_vma() argument
2353 vma = find_vma(mm, addr); in find_extend_vma()
2377 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) in remove_vma_list() argument
2382 update_hiwater_vm(mm); in remove_vma_list()
2388 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); in remove_vma_list()
2392 validate_mm(mm); in remove_vma_list()
2400 static void unmap_region(struct mm_struct *mm, in unmap_region() argument
2404 struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap; in unmap_region()
2408 tlb_gather_mmu(&tlb, mm, start, end); in unmap_region()
2409 update_hiwater_rss(mm); in unmap_region()
2421 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, in detach_vmas_to_be_unmapped() argument
2427 insertion_point = (prev ? &prev->vm_next : &mm->mmap); in detach_vmas_to_be_unmapped()
2430 vma_rb_erase(vma, &mm->mm_rb); in detach_vmas_to_be_unmapped()
2431 mm->map_count--; in detach_vmas_to_be_unmapped()
2440 mm->highest_vm_end = prev ? prev->vm_end : 0; in detach_vmas_to_be_unmapped()
2444 vmacache_invalidate(mm); in detach_vmas_to_be_unmapped()
2451 static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in __split_vma() argument
2518 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in split_vma() argument
2521 if (mm->map_count >= sysctl_max_map_count) in split_vma()
2524 return __split_vma(mm, vma, addr, new_below); in split_vma()
2532 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) in do_munmap() argument
2545 vma = find_vma(mm, start); in do_munmap()
2571 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) in do_munmap()
2574 error = __split_vma(mm, vma, start, 0); in do_munmap()
2581 last = find_vma(mm, end); in do_munmap()
2583 int error = __split_vma(mm, last, end, 1); in do_munmap()
2587 vma = prev ? prev->vm_next : mm->mmap; in do_munmap()
2592 if (mm->locked_vm) { in do_munmap()
2596 mm->locked_vm -= vma_pages(tmp); in do_munmap()
2606 detach_vmas_to_be_unmapped(mm, vma, prev, end); in do_munmap()
2607 unmap_region(mm, vma, prev, start, end); in do_munmap()
2609 arch_unmap(mm, vma, start, end); in do_munmap()
2612 remove_vma_list(mm, vma); in do_munmap()
2620 struct mm_struct *mm = current->mm; in vm_munmap() local
2622 down_write(&mm->mmap_sem); in vm_munmap()
2623 ret = do_munmap(mm, start, len); in vm_munmap()
2624 up_write(&mm->mmap_sem); in vm_munmap()
2643 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE5() local
2665 down_write(&mm->mmap_sem); in SYSCALL_DEFINE5()
2666 vma = find_vma(mm, start); in SYSCALL_DEFINE5()
2720 up_write(&mm->mmap_sem); in SYSCALL_DEFINE5()
2728 static inline void verify_mm_writelocked(struct mm_struct *mm) in verify_mm_writelocked() argument
2731 if (unlikely(down_read_trylock(&mm->mmap_sem))) { in verify_mm_writelocked()
2733 up_read(&mm->mmap_sem); in verify_mm_writelocked()
2745 struct mm_struct *mm = current->mm; in do_brk() local
2756 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; in do_brk()
2762 error = mlock_future_check(mm, mm->def_flags, len); in do_brk()
2770 verify_mm_writelocked(mm); in do_brk()
2775 while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, in do_brk()
2777 if (do_munmap(mm, addr, len)) in do_brk()
2782 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) in do_brk()
2785 if (mm->map_count > sysctl_max_map_count) in do_brk()
2788 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) in do_brk()
2792 vma = vma_merge(mm, prev, addr, addr + len, flags, in do_brk()
2807 vma->vm_mm = mm; in do_brk()
2813 vma_link(mm, vma, prev, rb_link, rb_parent); in do_brk()
2816 mm->total_vm += len >> PAGE_SHIFT; in do_brk()
2818 mm->locked_vm += (len >> PAGE_SHIFT); in do_brk()
2825 struct mm_struct *mm = current->mm; in vm_brk() local
2829 down_write(&mm->mmap_sem); in vm_brk()
2831 populate = ((mm->def_flags & VM_LOCKED) != 0); in vm_brk()
2832 up_write(&mm->mmap_sem); in vm_brk()
2840 void exit_mmap(struct mm_struct *mm) in exit_mmap() argument
2847 mmu_notifier_release(mm); in exit_mmap()
2849 if (mm->locked_vm) { in exit_mmap()
2850 vma = mm->mmap; in exit_mmap()
2858 arch_exit_mmap(mm); in exit_mmap()
2860 vma = mm->mmap; in exit_mmap()
2865 flush_cache_mm(mm); in exit_mmap()
2866 tlb_gather_mmu(&tlb, mm, 0, -1); in exit_mmap()
2890 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in insert_vm_struct() argument
2895 if (find_vma_links(mm, vma->vm_start, vma->vm_end, in insert_vm_struct()
2899 security_vm_enough_memory_mm(mm, vma_pages(vma))) in insert_vm_struct()
2919 vma_link(mm, vma, prev, rb_link, rb_parent); in insert_vm_struct()
2933 struct mm_struct *mm = vma->vm_mm; in copy_vma() local
2947 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) in copy_vma()
2949 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, in copy_vma()
2991 vma_link(mm, new_vma, prev, rb_link, rb_parent); in copy_vma()
3008 int may_expand_vm(struct mm_struct *mm, unsigned long npages) in may_expand_vm() argument
3010 unsigned long cur = mm->total_vm; /* pages */ in may_expand_vm()
3072 struct mm_struct *mm, in __install_special_mapping() argument
3085 vma->vm_mm = mm; in __install_special_mapping()
3089 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; in __install_special_mapping()
3095 ret = insert_vm_struct(mm, vma); in __install_special_mapping()
3099 mm->total_vm += len >> PAGE_SHIFT; in __install_special_mapping()
3120 struct mm_struct *mm, in _install_special_mapping() argument
3124 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, in _install_special_mapping()
3128 int install_special_mapping(struct mm_struct *mm, in install_special_mapping() argument
3133 mm, addr, len, vm_flags, (void *)pages, in install_special_mapping()
3141 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) in vm_lock_anon_vma() argument
3148 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem); in vm_lock_anon_vma()
3164 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) in vm_lock_mapping() argument
3178 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem); in vm_lock_mapping()
3213 int mm_take_all_locks(struct mm_struct *mm) in mm_take_all_locks() argument
3218 BUG_ON(down_read_trylock(&mm->mmap_sem)); in mm_take_all_locks()
3222 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3226 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3229 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3234 vm_lock_anon_vma(mm, avc->anon_vma); in mm_take_all_locks()
3240 mm_drop_all_locks(mm); in mm_take_all_locks()
3284 void mm_drop_all_locks(struct mm_struct *mm) in mm_drop_all_locks() argument
3289 BUG_ON(down_read_trylock(&mm->mmap_sem)); in mm_drop_all_locks()
3292 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_drop_all_locks()