dst_vma            93 include/linux/hugetlb.h 				struct vm_area_struct *dst_vma,
dst_vma           186 include/linux/hugetlb.h #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
dst_vma           127 include/linux/shmem_fs.h 				  struct vm_area_struct *dst_vma,
dst_vma           133 include/linux/shmem_fs.h 				    struct vm_area_struct *dst_vma,
dst_vma           136 include/linux/shmem_fs.h #define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
dst_vma           138 include/linux/shmem_fs.h #define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \
dst_vma          4262 mm/hugetlb.c   			    struct vm_area_struct *dst_vma,
dst_vma          4270 mm/hugetlb.c   	int vm_shared = dst_vma->vm_flags & VM_SHARED;
dst_vma          4271 mm/hugetlb.c   	struct hstate *h = hstate_vma(dst_vma);
dst_vma          4279 mm/hugetlb.c   		page = alloc_huge_page(dst_vma, dst_addr, 0);
dst_vma          4306 mm/hugetlb.c   	mapping = dst_vma->vm_file->f_mapping;
dst_vma          4307 mm/hugetlb.c   	idx = vma_hugecache_offset(h, dst_vma, dst_addr);
dst_vma          4354 mm/hugetlb.c   		hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
dst_vma          4357 mm/hugetlb.c   	_dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
dst_vma          4358 mm/hugetlb.c   	if (dst_vma->vm_flags & VM_WRITE)
dst_vma          4364 mm/hugetlb.c   	(void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
dst_vma          4365 mm/hugetlb.c   					dst_vma->vm_flags & VM_WRITE);
dst_vma          4369 mm/hugetlb.c   	update_mmu_cache(dst_vma, dst_addr, dst_pte);
dst_vma          2310 mm/shmem.c     				  struct vm_area_struct *dst_vma,
dst_vma          2316 mm/shmem.c     	struct inode *inode = file_inode(dst_vma->vm_file);
dst_vma          2320 mm/shmem.c     	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
dst_vma          2366 mm/shmem.c     	offset = linear_page_index(dst_vma, dst_addr);
dst_vma          2382 mm/shmem.c     	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
dst_vma          2383 mm/shmem.c     	if (dst_vma->vm_flags & VM_WRITE)
dst_vma          2420 mm/shmem.c     	update_mmu_cache(dst_vma, dst_addr, dst_pte);
dst_vma          2442 mm/shmem.c     			   struct vm_area_struct *dst_vma,
dst_vma          2447 mm/shmem.c     	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
dst_vma          2453 mm/shmem.c     			     struct vm_area_struct *dst_vma,
dst_vma          2458 mm/shmem.c     	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
dst_vma            23 mm/userfaultfd.c 			    struct vm_area_struct *dst_vma,
dst_vma            39 mm/userfaultfd.c 		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
dst_vma            72 mm/userfaultfd.c 	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
dst_vma            73 mm/userfaultfd.c 	if (dst_vma->vm_flags & VM_WRITE)
dst_vma            77 mm/userfaultfd.c 	if (dst_vma->vm_file) {
dst_vma            79 mm/userfaultfd.c 		inode = dst_vma->vm_file->f_inode;
dst_vma            80 mm/userfaultfd.c 		offset = linear_page_index(dst_vma, dst_addr);
dst_vma            91 mm/userfaultfd.c 	page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
dst_vma            93 mm/userfaultfd.c 	lru_cache_add_active_or_unevictable(page, dst_vma);
dst_vma            98 mm/userfaultfd.c 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
dst_vma           114 mm/userfaultfd.c 			      struct vm_area_struct *dst_vma,
dst_vma           124 mm/userfaultfd.c 					 dst_vma->vm_page_prot));
dst_vma           126 mm/userfaultfd.c 	if (dst_vma->vm_file) {
dst_vma           128 mm/userfaultfd.c 		inode = dst_vma->vm_file->f_inode;
dst_vma           129 mm/userfaultfd.c 		offset = linear_page_index(dst_vma, dst_addr);
dst_vma           140 mm/userfaultfd.c 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
dst_vma           174 mm/userfaultfd.c 					      struct vm_area_struct *dst_vma,
dst_vma           180 mm/userfaultfd.c 	int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
dst_vma           181 mm/userfaultfd.c 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
dst_vma           208 mm/userfaultfd.c 	vma_hpagesize = vma_kernel_pagesize(dst_vma);
dst_vma           222 mm/userfaultfd.c 	if (!dst_vma) {
dst_vma           224 mm/userfaultfd.c 		dst_vma = find_vma(dst_mm, dst_start);
dst_vma           225 mm/userfaultfd.c 		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
dst_vma           232 mm/userfaultfd.c 		if (!dst_vma->vm_userfaultfd_ctx.ctx)
dst_vma           235 mm/userfaultfd.c 		if (dst_start < dst_vma->vm_start ||
dst_vma           236 mm/userfaultfd.c 		    dst_start + len > dst_vma->vm_end)
dst_vma           240 mm/userfaultfd.c 		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
dst_vma           243 mm/userfaultfd.c 		vm_shared = dst_vma->vm_flags & VM_SHARED;
dst_vma           255 mm/userfaultfd.c 		if (unlikely(anon_vma_prepare(dst_vma)))
dst_vma           259 mm/userfaultfd.c 	h = hstate_vma(dst_vma);
dst_vma           270 mm/userfaultfd.c 		idx = linear_page_index(dst_vma, dst_addr);
dst_vma           271 mm/userfaultfd.c 		mapping = dst_vma->vm_file->f_mapping;
dst_vma           289 mm/userfaultfd.c 		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
dst_vma           310 mm/userfaultfd.c 			dst_vma = NULL;
dst_vma           386 mm/userfaultfd.c 				      struct vm_area_struct *dst_vma,
dst_vma           395 mm/userfaultfd.c 						struct vm_area_struct *dst_vma,
dst_vma           413 mm/userfaultfd.c 	if (!(dst_vma->vm_flags & VM_SHARED)) {
dst_vma           415 mm/userfaultfd.c 			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
dst_vma           419 mm/userfaultfd.c 						 dst_vma, dst_addr);
dst_vma           423 mm/userfaultfd.c 						     dst_vma, dst_addr,
dst_vma           427 mm/userfaultfd.c 						       dst_vma, dst_addr);
dst_vma           440 mm/userfaultfd.c 	struct vm_area_struct *dst_vma;
dst_vma           478 mm/userfaultfd.c 	dst_vma = find_vma(dst_mm, dst_start);
dst_vma           479 mm/userfaultfd.c 	if (!dst_vma)
dst_vma           486 mm/userfaultfd.c 	if (!dst_vma->vm_userfaultfd_ctx.ctx)
dst_vma           489 mm/userfaultfd.c 	if (dst_start < dst_vma->vm_start ||
dst_vma           490 mm/userfaultfd.c 	    dst_start + len > dst_vma->vm_end)
dst_vma           498 mm/userfaultfd.c 	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
dst_vma           499 mm/userfaultfd.c 	    dst_vma->vm_flags & VM_SHARED))
dst_vma           505 mm/userfaultfd.c 	if (is_vm_hugetlb_page(dst_vma))
dst_vma           506 mm/userfaultfd.c 		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
dst_vma           509 mm/userfaultfd.c 	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
dst_vma           518 mm/userfaultfd.c 	if (!(dst_vma->vm_flags & VM_SHARED) &&
dst_vma           519 mm/userfaultfd.c 	    unlikely(anon_vma_prepare(dst_vma)))
dst_vma           556 mm/userfaultfd.c 		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,