dst_pmd            12 include/linux/huge_mm.h 			 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
dst_pmd           126 include/linux/shmem_fs.h extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
dst_pmd           132 include/linux/shmem_fs.h 				    pmd_t *dst_pmd,
dst_pmd           138 include/linux/shmem_fs.h #define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \
dst_pmd           979 mm/huge_memory.c 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
dst_pmd           996 mm/huge_memory.c 	dst_ptl = pmd_lock(dst_mm, dst_pmd);
dst_pmd          1017 mm/huge_memory.c 		pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
dst_pmd          1018 mm/huge_memory.c 		set_pmd_at(dst_mm, addr, dst_pmd, pmd);
dst_pmd          1041 mm/huge_memory.c 		set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
dst_pmd          1053 mm/huge_memory.c 	pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
dst_pmd          1057 mm/huge_memory.c 	set_pmd_at(dst_mm, addr, dst_pmd, pmd);
dst_pmd           785 mm/memory.c    		   pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
dst_pmd           798 mm/memory.c    	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
dst_pmd           851 mm/memory.c    	pmd_t *src_pmd, *dst_pmd;
dst_pmd           854 mm/memory.c    	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
dst_pmd           855 mm/memory.c    	if (!dst_pmd)
dst_pmd           865 mm/memory.c    					    dst_pmd, src_pmd, addr, vma);
dst_pmd           874 mm/memory.c    		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
dst_pmd           877 mm/memory.c    	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
dst_pmd          2309 mm/shmem.c     				  pmd_t *dst_pmd,
dst_pmd          2396 mm/shmem.c     	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
dst_pmd          2441 mm/shmem.c     			   pmd_t *dst_pmd,
dst_pmd          2447 mm/shmem.c     	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
dst_pmd          2452 mm/shmem.c     			     pmd_t *dst_pmd,
dst_pmd          2458 mm/shmem.c     	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
dst_pmd            22 mm/userfaultfd.c 			    pmd_t *dst_pmd,
dst_pmd            76 mm/userfaultfd.c 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
dst_pmd           113 mm/userfaultfd.c 			      pmd_t *dst_pmd,
dst_pmd           125 mm/userfaultfd.c 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
dst_pmd           394 mm/userfaultfd.c 						pmd_t *dst_pmd,
dst_pmd           415 mm/userfaultfd.c 			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
dst_pmd           418 mm/userfaultfd.c 			err = mfill_zeropage_pte(dst_mm, dst_pmd,
dst_pmd           422 mm/userfaultfd.c 			err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
dst_pmd           426 mm/userfaultfd.c 			err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd,
dst_pmd           442 mm/userfaultfd.c 	pmd_t *dst_pmd;
dst_pmd           527 mm/userfaultfd.c 		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
dst_pmd           528 mm/userfaultfd.c 		if (unlikely(!dst_pmd)) {
dst_pmd           533 mm/userfaultfd.c 		dst_pmdval = pmd_read_atomic(dst_pmd);
dst_pmd           543 mm/userfaultfd.c 		    unlikely(__pte_alloc(dst_mm, dst_pmd))) {
dst_pmd           548 mm/userfaultfd.c 		if (unlikely(pmd_trans_huge(*dst_pmd))) {
dst_pmd           553 mm/userfaultfd.c 		BUG_ON(pmd_none(*dst_pmd));
dst_pmd           554 mm/userfaultfd.c 		BUG_ON(pmd_trans_huge(*dst_pmd));
dst_pmd           556 mm/userfaultfd.c 		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,