Lines Matching refs:mm
64 struct mm_struct *mm = vma->vm_mm; in follow_page_pte() local
73 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte()
90 migration_entry_wait(mm, pmd, address); in follow_page_pte()
186 struct mm_struct *mm = vma->vm_mm; in follow_page_mask() local
190 page = follow_huge_addr(mm, address, flags & FOLL_WRITE); in follow_page_mask()
196 pgd = pgd_offset(mm, address); in follow_page_mask()
204 page = follow_huge_pud(mm, address, pud, flags); in follow_page_mask()
216 page = follow_huge_pmd(mm, address, pmd, flags); in follow_page_mask()
228 ptl = pmd_lock(mm, pmd); in follow_page_mask()
246 static int get_gate_page(struct mm_struct *mm, unsigned long address, in get_gate_page() argument
262 pgd = pgd_offset_gate(mm, address); in get_gate_page()
273 *vma = get_gate_vma(mm); in get_gate_page()
298 struct mm_struct *mm = vma->vm_mm; in faultin_page() local
321 ret = handle_mm_fault(mm, vma, address, fault_flags); in faultin_page()
453 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, in __get_user_pages() argument
482 vma = find_extend_vma(mm, start); in __get_user_pages()
483 if (!vma && in_gate_area(mm, start)) { in __get_user_pages()
485 ret = get_gate_page(mm, start & PAGE_MASK, in __get_user_pages()
497 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
587 int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, in fixup_user_fault() argument
594 vma = find_extend_vma(mm, address); in fixup_user_fault()
602 ret = handle_mm_fault(mm, vma, address, fault_flags); in fixup_user_fault()
622 struct mm_struct *mm, in __get_user_pages_locked() argument
651 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
690 down_read(&mm->mmap_sem); in __get_user_pages_locked()
691 ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, in __get_user_pages_locked()
711 up_read(&mm->mmap_sem); in __get_user_pages_locked()
738 long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, in get_user_pages_locked() argument
743 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, in get_user_pages_locked()
758 __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, in __get_user_pages_unlocked() argument
765 down_read(&mm->mmap_sem); in __get_user_pages_unlocked()
766 ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, in __get_user_pages_unlocked()
769 up_read(&mm->mmap_sem); in __get_user_pages_unlocked()
791 long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, in get_user_pages_unlocked() argument
795 return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, in get_user_pages_unlocked()
855 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, in get_user_pages() argument
859 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, in get_user_pages()
886 struct mm_struct *mm = vma->vm_mm; in populate_vma_page_range() local
894 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); in populate_vma_page_range()
919 return __get_user_pages(current, mm, start, nr_pages, gup_flags, in populate_vma_page_range()
932 struct mm_struct *mm = current->mm; in __mm_populate() local
949 down_read(&mm->mmap_sem); in __mm_populate()
950 vma = find_vma(mm, nstart); in __mm_populate()
981 up_read(&mm->mmap_sem); in __mm_populate()
1005 if (__get_user_pages(current, current->mm, addr, 1, in get_dump_page()
1328 struct mm_struct *mm = current->mm; in __get_user_pages_fast() local
1356 pgdp = pgd_offset(mm, addr); in __get_user_pages_fast()
1398 struct mm_struct *mm = current->mm; in get_user_pages_fast() local
1410 ret = get_user_pages_unlocked(current, mm, start, in get_user_pages_fast()