Lines Matching refs:mm

123 		vma = find_vma(current->mm, (unsigned long)objp);  in kobjsize()
135 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, in __get_user_pages() argument
153 vma = find_vma(mm, start); in __get_user_pages()
185 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, in get_user_pages() argument
197 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, in get_user_pages()
202 long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, in get_user_pages_locked() argument
207 return get_user_pages(tsk, mm, start, nr_pages, write, force, in get_user_pages_locked()
212 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, in __get_user_pages_unlocked() argument
218 down_read(&mm->mmap_sem); in __get_user_pages_unlocked()
219 ret = get_user_pages(tsk, mm, start, nr_pages, write, force, in __get_user_pages_unlocked()
221 up_read(&mm->mmap_sem); in __get_user_pages_unlocked()
226 long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, in get_user_pages_unlocked() argument
230 return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, in get_user_pages_unlocked()
283 down_write(&current->mm->mmap_sem); in vmalloc_user()
284 vma = find_vma(current->mm, (unsigned long)ret); in vmalloc_user()
287 up_write(&current->mm->mmap_sem); in vmalloc_user()
531 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1() local
533 if (brk < mm->start_brk || brk > mm->context.end_brk) in SYSCALL_DEFINE1()
534 return mm->brk; in SYSCALL_DEFINE1()
536 if (mm->brk == brk) in SYSCALL_DEFINE1()
537 return mm->brk; in SYSCALL_DEFINE1()
542 if (brk <= mm->brk) { in SYSCALL_DEFINE1()
543 mm->brk = brk; in SYSCALL_DEFINE1()
550 flush_icache_range(mm->brk, brk); in SYSCALL_DEFINE1()
551 return mm->brk = brk; in SYSCALL_DEFINE1()
701 struct mm_struct *mm = vma->vm_mm; in protect_vma() local
704 protect_page(mm, start, flags); in protect_vma()
707 update_protections(mm); in protect_vma()
717 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) in add_vma_to_mm() argument
725 mm->map_count++; in add_vma_to_mm()
726 vma->vm_mm = mm; in add_vma_to_mm()
743 p = &mm->mm_rb.rb_node; in add_vma_to_mm()
770 rb_insert_color(&vma->vm_rb, &mm->mm_rb); in add_vma_to_mm()
777 __vma_link_list(mm, vma, prev, parent); in add_vma_to_mm()
787 struct mm_struct *mm = vma->vm_mm; in delete_vma_from_mm() local
792 mm->map_count--; in delete_vma_from_mm()
796 vmacache_invalidate(mm); in delete_vma_from_mm()
813 rb_erase(&vma->vm_rb, &mm->mm_rb); in delete_vma_from_mm()
818 mm->mmap = vma->vm_next; in delete_vma_from_mm()
827 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) in delete_vma() argument
841 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) in find_vma() argument
846 vma = vmacache_find(mm, addr); in find_vma()
852 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma()
869 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) in find_extend_vma() argument
871 return find_vma(mm, addr); in find_extend_vma()
887 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, in find_vma_exact() argument
895 vma = vmacache_find_exact(mm, addr, end); in find_vma_exact()
901 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma_exact()
1413 current->mm->total_vm += len >> PAGE_SHIFT; in do_mmap()
1416 add_vma_to_mm(current->mm, vma); in do_mmap()
1512 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in split_vma() argument
1524 if (mm->map_count >= sysctl_max_map_count) in split_vma()
1567 add_vma_to_mm(mm, vma); in split_vma()
1568 add_vma_to_mm(mm, new); in split_vma()
1576 static int shrink_vma(struct mm_struct *mm, in shrink_vma() argument
1589 add_vma_to_mm(mm, vma); in shrink_vma()
1615 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) in do_munmap() argument
1628 vma = find_vma(mm, start); in do_munmap()
1661 ret = split_vma(mm, vma, start, 1); in do_munmap()
1665 return shrink_vma(mm, vma, start, end); in do_munmap()
1670 delete_vma(mm, vma); in do_munmap()
1677 struct mm_struct *mm = current->mm; in vm_munmap() local
1680 down_write(&mm->mmap_sem); in vm_munmap()
1681 ret = do_munmap(mm, addr, len); in vm_munmap()
1682 up_write(&mm->mmap_sem); in vm_munmap()
1695 void exit_mmap(struct mm_struct *mm) in exit_mmap() argument
1699 if (!mm) in exit_mmap()
1702 mm->total_vm = 0; in exit_mmap()
1704 while ((vma = mm->mmap)) { in exit_mmap()
1705 mm->mmap = vma->vm_next; in exit_mmap()
1707 delete_vma(mm, vma); in exit_mmap()
1745 vma = find_vma_exact(current->mm, addr, old_len); in do_mremap()
1769 down_write(&current->mm->mmap_sem); in SYSCALL_DEFINE5()
1771 up_write(&current->mm->mmap_sem); in SYSCALL_DEFINE5()
1848 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) in __vm_enough_memory() argument
1912 if (mm) { in __vm_enough_memory()
1914 allowed -= min_t(long, mm->total_vm / 32, reserve); in __vm_enough_memory()
1939 static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, in __access_remote_vm() argument
1944 down_read(&mm->mmap_sem); in __access_remote_vm()
1947 vma = find_vma(mm, addr); in __access_remote_vm()
1966 up_read(&mm->mmap_sem); in __access_remote_vm()
1981 int access_remote_vm(struct mm_struct *mm, unsigned long addr, in access_remote_vm() argument
1984 return __access_remote_vm(NULL, mm, addr, buf, len, write); in access_remote_vm()
1993 struct mm_struct *mm; in access_process_vm() local
1998 mm = get_task_mm(tsk); in access_process_vm()
1999 if (!mm) in access_process_vm()
2002 len = __access_remote_vm(tsk, mm, addr, buf, len, write); in access_process_vm()
2004 mmput(mm); in access_process_vm()