dst_mm            311 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct drm_mm_node *src_mm, *dst_mm;
dst_mm            330 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
dst_mm            331 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
dst_mm            333 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
dst_mm            402 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 			dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
dst_mm            404 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 			dst_node_size = (dst_mm->size << PAGE_SHIFT);
dst_mm             11 include/linux/huge_mm.h extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
dst_mm             15 include/linux/huge_mm.h extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
dst_mm             92 include/linux/hugetlb.h int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
dst_mm            186 include/linux/hugetlb.h #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
dst_mm            126 include/linux/shmem_fs.h extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
dst_mm            131 include/linux/shmem_fs.h extern int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
dst_mm            136 include/linux/shmem_fs.h #define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
dst_mm            138 include/linux/shmem_fs.h #define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \
dst_mm             35 include/linux/userfaultfd_k.h extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
dst_mm             38 include/linux/userfaultfd_k.h extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
dst_mm            978 mm/huge_memory.c int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
dst_mm            992 mm/huge_memory.c 	pgtable = pte_alloc_one(dst_mm);
dst_mm            996 mm/huge_memory.c 	dst_ptl = pmd_lock(dst_mm, dst_pmd);
dst_mm           1015 mm/huge_memory.c 		add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
dst_mm           1016 mm/huge_memory.c 		mm_inc_nr_ptes(dst_mm);
dst_mm           1017 mm/huge_memory.c 		pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
dst_mm           1018 mm/huge_memory.c 		set_pmd_at(dst_mm, addr, dst_pmd, pmd);
dst_mm           1025 mm/huge_memory.c 		pte_free(dst_mm, pgtable);
dst_mm           1040 mm/huge_memory.c 		zero_page = mm_get_huge_zero_page(dst_mm);
dst_mm           1041 mm/huge_memory.c 		set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
dst_mm           1051 mm/huge_memory.c 	add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
dst_mm           1052 mm/huge_memory.c 	mm_inc_nr_ptes(dst_mm);
dst_mm           1053 mm/huge_memory.c 	pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
dst_mm           1057 mm/huge_memory.c 	set_pmd_at(dst_mm, addr, dst_pmd, pmd);
dst_mm           1118 mm/huge_memory.c int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
dst_mm           1126 mm/huge_memory.c 	dst_ptl = pud_lock(dst_mm, dst_pud);
dst_mm           1146 mm/huge_memory.c 	set_pud_at(dst_mm, addr, dst_pud, pud);
dst_mm           4260 mm/hugetlb.c   int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
dst_mm           4329 mm/hugetlb.c   	ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
dst_mm           4362 mm/hugetlb.c   	set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
dst_mm           4366 mm/hugetlb.c   	hugetlb_count_add(pages_per_huge_page(h), dst_mm);
dst_mm            678 mm/memory.c    copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
dst_mm            695 mm/memory.c    			if (unlikely(list_empty(&dst_mm->mmlist))) {
dst_mm            697 mm/memory.c    				if (list_empty(&dst_mm->mmlist))
dst_mm            698 mm/memory.c    					list_add(&dst_mm->mmlist,
dst_mm            780 mm/memory.c    	set_pte_at(dst_mm, addr, dst_pte, pte);
dst_mm            784 mm/memory.c    static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
dst_mm            798 mm/memory.c    	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
dst_mm            823 mm/memory.c    		entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
dst_mm            833 mm/memory.c    	add_mm_rss_vec(dst_mm, rss);
dst_mm            847 mm/memory.c    static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
dst_mm            854 mm/memory.c    	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
dst_mm            864 mm/memory.c    			err = copy_huge_pmd(dst_mm, src_mm,
dst_mm            874 mm/memory.c    		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
dst_mm            881 mm/memory.c    static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
dst_mm            888 mm/memory.c    	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
dst_mm            898 mm/memory.c    			err = copy_huge_pud(dst_mm, src_mm,
dst_mm            908 mm/memory.c    		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
dst_mm            915 mm/memory.c    static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
dst_mm            922 mm/memory.c    	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
dst_mm            930 mm/memory.c    		if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
dst_mm            937 mm/memory.c    int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
dst_mm            959 mm/memory.c    		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
dst_mm            986 mm/memory.c    	dst_pgd = pgd_offset(dst_mm, addr);
dst_mm            992 mm/memory.c    		if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
dst_mm           2308 mm/shmem.c     static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
dst_mm           2371 mm/shmem.c     	ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false);
dst_mm           2396 mm/shmem.c     	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
dst_mm           2415 mm/shmem.c     	inc_mm_counter(dst_mm, mm_counter_file(page));
dst_mm           2417 mm/shmem.c     	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
dst_mm           2440 mm/shmem.c     int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
dst_mm           2447 mm/shmem.c     	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
dst_mm           2451 mm/shmem.c     int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
dst_mm           2458 mm/shmem.c     	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
dst_mm             21 mm/userfaultfd.c static int mcopy_atomic_pte(struct mm_struct *dst_mm,
dst_mm             69 mm/userfaultfd.c 	if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false))
dst_mm             76 mm/userfaultfd.c 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
dst_mm             90 mm/userfaultfd.c 	inc_mm_counter(dst_mm, MM_ANONPAGES);
dst_mm             95 mm/userfaultfd.c 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
dst_mm            112 mm/userfaultfd.c static int mfill_zeropage_pte(struct mm_struct *dst_mm,
dst_mm            125 mm/userfaultfd.c 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
dst_mm            138 mm/userfaultfd.c 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
dst_mm            173 mm/userfaultfd.c static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
dst_mm            200 mm/userfaultfd.c 		up_read(&dst_mm->mmap_sem);
dst_mm            224 mm/userfaultfd.c 		dst_vma = find_vma(dst_mm, dst_start);
dst_mm            276 mm/userfaultfd.c 		dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
dst_mm            289 mm/userfaultfd.c 		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
dst_mm            298 mm/userfaultfd.c 			up_read(&dst_mm->mmap_sem);
dst_mm            308 mm/userfaultfd.c 			down_read(&dst_mm->mmap_sem);
dst_mm            328 mm/userfaultfd.c 	up_read(&dst_mm->mmap_sem);
dst_mm            385 mm/userfaultfd.c extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
dst_mm            393 mm/userfaultfd.c static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
dst_mm            415 mm/userfaultfd.c 			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
dst_mm            418 mm/userfaultfd.c 			err = mfill_zeropage_pte(dst_mm, dst_pmd,
dst_mm            422 mm/userfaultfd.c 			err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
dst_mm            426 mm/userfaultfd.c 			err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd,
dst_mm            433 mm/userfaultfd.c static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
dst_mm            462 mm/userfaultfd.c 	down_read(&dst_mm->mmap_sem);
dst_mm            478 mm/userfaultfd.c 	dst_vma = find_vma(dst_mm, dst_start);
dst_mm            506 mm/userfaultfd.c 		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
dst_mm            527 mm/userfaultfd.c 		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
dst_mm            543 mm/userfaultfd.c 		    unlikely(__pte_alloc(dst_mm, dst_pmd))) {
dst_mm            556 mm/userfaultfd.c 		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
dst_mm            563 mm/userfaultfd.c 			up_read(&dst_mm->mmap_sem);
dst_mm            592 mm/userfaultfd.c 	up_read(&dst_mm->mmap_sem);
dst_mm            602 mm/userfaultfd.c ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
dst_mm            606 mm/userfaultfd.c 	return __mcopy_atomic(dst_mm, dst_start, src_start, len, false,
dst_mm            610 mm/userfaultfd.c ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
dst_mm            613 mm/userfaultfd.c 	return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing);