vmf              1092 arch/mips/kvm/mips.c vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
vmf               211 arch/powerpc/kvm/book3s_64_vio.c static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
vmf               213 arch/powerpc/kvm/book3s_64_vio.c 	struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
vmf               216 arch/powerpc/kvm/book3s_64_vio.c 	if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
vmf               219 arch/powerpc/kvm/book3s_64_vio.c 	page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
vmf               224 arch/powerpc/kvm/book3s_64_vio.c 	vmf->page = page;
vmf               227 arch/powerpc/kvm/book3s_xive_native.c static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
vmf               229 arch/powerpc/kvm/book3s_xive_native.c 	struct vm_area_struct *vma = vmf->vma;
vmf               245 arch/powerpc/kvm/book3s_xive_native.c 	page_offset = vmf->pgoff - vma->vm_pgoff;
vmf               272 arch/powerpc/kvm/book3s_xive_native.c 	vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT);
vmf               280 arch/powerpc/kvm/book3s_xive_native.c static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf)
vmf               282 arch/powerpc/kvm/book3s_xive_native.c 	struct vm_area_struct *vma = vmf->vma;
vmf               284 arch/powerpc/kvm/book3s_xive_native.c 	switch (vmf->pgoff - vma->vm_pgoff) {
vmf               289 arch/powerpc/kvm/book3s_xive_native.c 		vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT);
vmf              2080 arch/powerpc/kvm/powerpc.c vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
vmf               223 arch/powerpc/platforms/cell/spufs/file.c spufs_mem_mmap_fault(struct vm_fault *vmf)
vmf               225 arch/powerpc/platforms/cell/spufs/file.c 	struct vm_area_struct *vma = vmf->vma;
vmf               230 arch/powerpc/platforms/cell/spufs/file.c 	offset = vmf->pgoff << PAGE_SHIFT;
vmf               235 arch/powerpc/platforms/cell/spufs/file.c 			vmf->address, offset);
vmf               247 arch/powerpc/platforms/cell/spufs/file.c 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
vmf               303 arch/powerpc/platforms/cell/spufs/file.c static vm_fault_t spufs_ps_fault(struct vm_fault *vmf,
vmf               307 arch/powerpc/platforms/cell/spufs/file.c 	struct spu_context *ctx = vmf->vma->vm_file->private_data;
vmf               308 arch/powerpc/platforms/cell/spufs/file.c 	unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
vmf               346 arch/powerpc/platforms/cell/spufs/file.c 		ret = vmf_insert_pfn(vmf->vma, vmf->address,
vmf               360 arch/powerpc/platforms/cell/spufs/file.c static vm_fault_t spufs_cntl_mmap_fault(struct vm_fault *vmf)
vmf               362 arch/powerpc/platforms/cell/spufs/file.c 	return spufs_ps_fault(vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
vmf              1034 arch/powerpc/platforms/cell/spufs/file.c spufs_signal1_mmap_fault(struct vm_fault *vmf)
vmf              1037 arch/powerpc/platforms/cell/spufs/file.c 	return spufs_ps_fault(vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
vmf              1042 arch/powerpc/platforms/cell/spufs/file.c 	return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
vmf              1172 arch/powerpc/platforms/cell/spufs/file.c spufs_signal2_mmap_fault(struct vm_fault *vmf)
vmf              1175 arch/powerpc/platforms/cell/spufs/file.c 	return spufs_ps_fault(vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
vmf              1180 arch/powerpc/platforms/cell/spufs/file.c 	return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
vmf              1301 arch/powerpc/platforms/cell/spufs/file.c spufs_mss_mmap_fault(struct vm_fault *vmf)
vmf              1303 arch/powerpc/platforms/cell/spufs/file.c 	return spufs_ps_fault(vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
vmf              1363 arch/powerpc/platforms/cell/spufs/file.c spufs_psmap_mmap_fault(struct vm_fault *vmf)
vmf              1365 arch/powerpc/platforms/cell/spufs/file.c 	return spufs_ps_fault(vmf, 0x0000, SPUFS_PS_MAP_SIZE);
vmf              1423 arch/powerpc/platforms/cell/spufs/file.c spufs_mfc_mmap_fault(struct vm_fault *vmf)
vmf              1425 arch/powerpc/platforms/cell/spufs/file.c 	return spufs_ps_fault(vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
vmf                51 arch/s390/kernel/vdso.c 		      struct vm_area_struct *vma, struct vm_fault *vmf)
vmf                65 arch/s390/kernel/vdso.c 	if (vmf->pgoff >= vdso_pages)
vmf                68 arch/s390/kernel/vdso.c 	vmf->page = vdso_pagelist[vmf->pgoff];
vmf                69 arch/s390/kernel/vdso.c 	get_page(vmf->page);
vmf              4485 arch/s390/kvm/kvm-s390.c vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
vmf              4488 arch/s390/kvm/kvm-s390.c 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
vmf              4490 arch/s390/kvm/kvm-s390.c 		vmf->page = virt_to_page(vcpu->arch.sie_block);
vmf              4491 arch/s390/kvm/kvm-s390.c 		get_page(vmf->page);
vmf                43 arch/x86/entry/vdso/vma.c 		      struct vm_area_struct *vma, struct vm_fault *vmf)
vmf                47 arch/x86/entry/vdso/vma.c 	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
vmf                50 arch/x86/entry/vdso/vma.c 	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
vmf                51 arch/x86/entry/vdso/vma.c 	get_page(vmf->page);
vmf                88 arch/x86/entry/vdso/vma.c 		      struct vm_area_struct *vma, struct vm_fault *vmf)
vmf                96 arch/x86/entry/vdso/vma.c 	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
vmf               110 arch/x86/entry/vdso/vma.c 		return vmf_insert_pfn(vma, vmf->address,
vmf               116 arch/x86/entry/vdso/vma.c 			return vmf_insert_pfn_prot(vma, vmf->address,
vmf               124 arch/x86/entry/vdso/vma.c 			return vmf_insert_pfn(vma, vmf->address,
vmf              4554 arch/x86/kvm/x86.c vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
vmf              5156 drivers/android/binder.c static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
vmf                14 drivers/char/agp/alpha-agp.c static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf)
vmf                21 drivers/char/agp/alpha-agp.c 	dma_addr = vmf->address - vmf->vma->vm_start + agp->aperture.bus_base;
vmf                32 drivers/char/agp/alpha-agp.c 	vmf->page = page;
vmf               138 drivers/char/mspec.c mspec_fault(struct vm_fault *vmf)
vmf               142 drivers/char/mspec.c 	pgoff_t index = vmf->pgoff;
vmf               143 drivers/char/mspec.c 	struct vma_data *vdata = vmf->vma->vm_private_data;
vmf               165 drivers/char/mspec.c 	return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
vmf                79 drivers/dax/device.c 				struct vm_fault *vmf, pfn_t *pfn)
vmf                86 drivers/dax/device.c 	if (check_vma(dev_dax, vmf->vma, __func__))
vmf                99 drivers/dax/device.c 	phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
vmf               101 drivers/dax/device.c 		dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff);
vmf               107 drivers/dax/device.c 	return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
vmf               111 drivers/dax/device.c 				struct vm_fault *vmf, pfn_t *pfn)
vmf               113 drivers/dax/device.c 	unsigned long pmd_addr = vmf->address & PMD_MASK;
vmf               120 drivers/dax/device.c 	if (check_vma(dev_dax, vmf->vma, __func__))
vmf               142 drivers/dax/device.c 	if (pmd_addr < vmf->vma->vm_start ||
vmf               143 drivers/dax/device.c 			(pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
vmf               146 drivers/dax/device.c 	pgoff = linear_page_index(vmf->vma, pmd_addr);
vmf               155 drivers/dax/device.c 	return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
vmf               160 drivers/dax/device.c 				struct vm_fault *vmf, pfn_t *pfn)
vmf               162 drivers/dax/device.c 	unsigned long pud_addr = vmf->address & PUD_MASK;
vmf               170 drivers/dax/device.c 	if (check_vma(dev_dax, vmf->vma, __func__))
vmf               192 drivers/dax/device.c 	if (pud_addr < vmf->vma->vm_start ||
vmf               193 drivers/dax/device.c 			(pud_addr + PUD_SIZE) > vmf->vma->vm_end)
vmf               196 drivers/dax/device.c 	pgoff = linear_page_index(vmf->vma, pud_addr);
vmf               205 drivers/dax/device.c 	return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
vmf               209 drivers/dax/device.c 				struct vm_fault *vmf, pfn_t *pfn)
vmf               215 drivers/dax/device.c static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
vmf               218 drivers/dax/device.c 	struct file *filp = vmf->vma->vm_file;
vmf               226 drivers/dax/device.c 			(vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
vmf               227 drivers/dax/device.c 			vmf->vma->vm_start, vmf->vma->vm_end, pe_size);
vmf               233 drivers/dax/device.c 		rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn);
vmf               237 drivers/dax/device.c 		rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn);
vmf               241 drivers/dax/device.c 		rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn);
vmf               257 drivers/dax/device.c 		pgoff = linear_page_index(vmf->vma, vmf->address
vmf               274 drivers/dax/device.c static vm_fault_t dev_dax_fault(struct vm_fault *vmf)
vmf               276 drivers/dax/device.c 	return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
vmf                23 drivers/dma-buf/udmabuf.c static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
vmf                25 drivers/dma-buf/udmabuf.c 	struct vm_area_struct *vma = vmf->vma;
vmf                28 drivers/dma-buf/udmabuf.c 	vmf->page = ubuf->pages[vmf->pgoff];
vmf                29 drivers/dma-buf/udmabuf.c 	get_page(vmf->page);
vmf                18 drivers/gpu/drm/armada/armada_gem.c static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
vmf                20 drivers/gpu/drm/armada/armada_gem.c 	struct drm_gem_object *gobj = vmf->vma->vm_private_data;
vmf                24 drivers/gpu/drm/armada/armada_gem.c 	pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
vmf                25 drivers/gpu/drm/armada/armada_gem.c 	return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
vmf               471 drivers/gpu/drm/drm_gem_shmem_helper.c static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
vmf               473 drivers/gpu/drm/drm_gem_shmem_helper.c 	struct vm_area_struct *vma = vmf->vma;
vmf               479 drivers/gpu/drm/drm_gem_shmem_helper.c 	if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))
vmf               482 drivers/gpu/drm/drm_gem_shmem_helper.c 	page = shmem->pages[vmf->pgoff];
vmf               484 drivers/gpu/drm/drm_gem_shmem_helper.c 	return vmf_insert_page(vma, vmf->address, page);
vmf               116 drivers/gpu/drm/drm_vm.c static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
vmf               118 drivers/gpu/drm/drm_vm.c 	struct vm_area_struct *vma = vmf->vma;
vmf               145 drivers/gpu/drm/drm_vm.c 		resource_size_t offset = vmf->address - vma->vm_start;
vmf               175 drivers/gpu/drm/drm_vm.c 		vmf->page = page;
vmf               189 drivers/gpu/drm/drm_vm.c static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
vmf               205 drivers/gpu/drm/drm_vm.c static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
vmf               207 drivers/gpu/drm/drm_vm.c 	struct vm_area_struct *vma = vmf->vma;
vmf               216 drivers/gpu/drm/drm_vm.c 	offset = vmf->address - vma->vm_start;
vmf               222 drivers/gpu/drm/drm_vm.c 	vmf->page = page;
vmf               307 drivers/gpu/drm/drm_vm.c static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
vmf               309 drivers/gpu/drm/drm_vm.c 	struct vm_area_struct *vma = vmf->vma;
vmf               322 drivers/gpu/drm/drm_vm.c 	offset = vmf->address - vma->vm_start;
vmf               328 drivers/gpu/drm/drm_vm.c 	vmf->page = page;
vmf               342 drivers/gpu/drm/drm_vm.c static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
vmf               344 drivers/gpu/drm/drm_vm.c 	struct vm_area_struct *vma = vmf->vma;
vmf               359 drivers/gpu/drm/drm_vm.c 	offset = vmf->address - vma->vm_start;
vmf               364 drivers/gpu/drm/drm_vm.c 	vmf->page = page;
vmf                51 drivers/gpu/drm/etnaviv/etnaviv_drv.h vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf);
vmf               173 drivers/gpu/drm/etnaviv/etnaviv_gem.c vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
vmf               175 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	struct vm_area_struct *vma = vmf->vma;
vmf               200 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vmf               204 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
vmf               207 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	return vmf_insert_page(vma, vmf->address, page);
vmf               384 drivers/gpu/drm/exynos/exynos_drm_gem.c vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
vmf               386 drivers/gpu/drm/exynos/exynos_drm_gem.c 	struct vm_area_struct *vma = vmf->vma;
vmf               392 drivers/gpu/drm/exynos/exynos_drm_gem.c 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vmf               400 drivers/gpu/drm/exynos/exynos_drm_gem.c 	return vmf_insert_mixed(vma, vmf->address,
vmf               105 drivers/gpu/drm/exynos/exynos_drm_gem.h vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf);
vmf                99 drivers/gpu/drm/gma500/framebuffer.c static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
vmf               101 drivers/gpu/drm/gma500/framebuffer.c 	struct vm_area_struct *vma = vmf->vma;
vmf               115 drivers/gpu/drm/gma500/framebuffer.c 	address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
vmf               126 drivers/gpu/drm/gma500/gem.c vm_fault_t psb_gem_fault(struct vm_fault *vmf)
vmf               128 drivers/gpu/drm/gma500/gem.c 	struct vm_area_struct *vma = vmf->vma;
vmf               162 drivers/gpu/drm/gma500/gem.c 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vmf               169 drivers/gpu/drm/gma500/gem.c 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
vmf               741 drivers/gpu/drm/gma500/psb_drv.h extern vm_fault_t psb_gem_fault(struct vm_fault *vmf);
vmf               217 drivers/gpu/drm/i915/gem/i915_gem_mman.c vm_fault_t i915_gem_fault(struct vm_fault *vmf)
vmf               220 drivers/gpu/drm/i915/gem/i915_gem_mman.c 	struct vm_area_struct *area = vmf->vma;
vmf               238 drivers/gpu/drm/i915/gem/i915_gem_mman.c 	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
vmf              2346 drivers/gpu/drm/i915/i915_drv.h vm_fault_t i915_gem_fault(struct vm_fault *vmf);
vmf                91 drivers/gpu/drm/lima/lima_gem.c static vm_fault_t lima_gem_fault(struct vm_fault *vmf)
vmf                93 drivers/gpu/drm/lima/lima_gem.c 	struct vm_area_struct *vma = vmf->vma;
vmf               100 drivers/gpu/drm/lima/lima_gem.c 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vmf               103 drivers/gpu/drm/lima/lima_gem.c 	return vmf_insert_mixed(vma, vmf->address, pfn);
vmf               272 drivers/gpu/drm/msm/msm_drv.h vm_fault_t msm_gem_fault(struct vm_fault *vmf);
vmf               250 drivers/gpu/drm/msm/msm_gem.c vm_fault_t msm_gem_fault(struct vm_fault *vmf)
vmf               252 drivers/gpu/drm/msm/msm_gem.c 	struct vm_area_struct *vma = vmf->vma;
vmf               284 drivers/gpu/drm/msm/msm_gem.c 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vmf               288 drivers/gpu/drm/msm/msm_gem.c 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
vmf               291 drivers/gpu/drm/msm/msm_gem.c 	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
vmf               132 drivers/gpu/drm/nouveau/nouveau_dmem.c 		struct vm_fault *vmf, struct migrate_vma *args,
vmf               142 drivers/gpu/drm/nouveau/nouveau_dmem.c 	dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
vmf               165 drivers/gpu/drm/nouveau/nouveau_dmem.c static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
vmf               167 drivers/gpu/drm/nouveau/nouveau_dmem.c 	struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
vmf               174 drivers/gpu/drm/nouveau/nouveau_dmem.c 		.vma		= vmf->vma,
vmf               175 drivers/gpu/drm/nouveau/nouveau_dmem.c 		.start		= vmf->address,
vmf               176 drivers/gpu/drm/nouveau/nouveau_dmem.c 		.end		= vmf->address + PAGE_SIZE,
vmf               191 drivers/gpu/drm/nouveau/nouveau_dmem.c 	ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
vmf               347 drivers/gpu/drm/omapdrm/omap_gem.c 		struct vm_area_struct *vma, struct vm_fault *vmf)
vmf               354 drivers/gpu/drm/omapdrm/omap_gem.c 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vmf               364 drivers/gpu/drm/omapdrm/omap_gem.c 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
vmf               367 drivers/gpu/drm/omapdrm/omap_gem.c 	return vmf_insert_mixed(vma, vmf->address,
vmf               373 drivers/gpu/drm/omapdrm/omap_gem.c 		struct vm_area_struct *vma, struct vm_fault *vmf)
vmf               403 drivers/gpu/drm/omapdrm/omap_gem.c 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vmf               414 drivers/gpu/drm/omapdrm/omap_gem.c 	vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
vmf               459 drivers/gpu/drm/omapdrm/omap_gem.c 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
vmf               490 drivers/gpu/drm/omapdrm/omap_gem.c vm_fault_t omap_gem_fault(struct vm_fault *vmf)
vmf               492 drivers/gpu/drm/omapdrm/omap_gem.c 	struct vm_area_struct *vma = vmf->vma;
vmf               517 drivers/gpu/drm/omapdrm/omap_gem.c 		ret = omap_gem_fault_2d(obj, vma, vmf);
vmf               519 drivers/gpu/drm/omapdrm/omap_gem.c 		ret = omap_gem_fault_1d(obj, vma, vmf);
vmf                72 drivers/gpu/drm/omapdrm/omap_gem.h vm_fault_t omap_gem_fault(struct vm_fault *vmf);
vmf                54 drivers/gpu/drm/qxl/qxl_ttm.c static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf)
vmf                59 drivers/gpu/drm/qxl/qxl_ttm.c 	bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
vmf                62 drivers/gpu/drm/qxl/qxl_ttm.c 	ret = ttm_vm_ops->fault(vmf);
vmf               886 drivers/gpu/drm/radeon/radeon_ttm.c static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
vmf               892 drivers/gpu/drm/radeon/radeon_ttm.c 	bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
vmf               898 drivers/gpu/drm/radeon/radeon_ttm.c 	ret = ttm_vm_ops->fault(vmf);
vmf               425 drivers/gpu/drm/tegra/gem.c static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
vmf               427 drivers/gpu/drm/tegra/gem.c 	struct vm_area_struct *vma = vmf->vma;
vmf               436 drivers/gpu/drm/tegra/gem.c 	offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vmf               439 drivers/gpu/drm/tegra/gem.c 	return vmf_insert_page(vma, vmf->address, page);
vmf                48 drivers/gpu/drm/ttm/ttm_bo_vm.c 				struct vm_fault *vmf)
vmf                66 drivers/gpu/drm/ttm/ttm_bo_vm.c 	if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
vmf                68 drivers/gpu/drm/ttm/ttm_bo_vm.c 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
vmf                72 drivers/gpu/drm/ttm/ttm_bo_vm.c 		up_read(&vmf->vma->vm_mm->mmap_sem);
vmf               109 drivers/gpu/drm/ttm/ttm_bo_vm.c static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
vmf               111 drivers/gpu/drm/ttm/ttm_bo_vm.c 	struct vm_area_struct *vma = vmf->vma;
vmf               123 drivers/gpu/drm/ttm/ttm_bo_vm.c 	unsigned long address = vmf->address;
vmf               135 drivers/gpu/drm/ttm/ttm_bo_vm.c 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
vmf               136 drivers/gpu/drm/ttm/ttm_bo_vm.c 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
vmf               138 drivers/gpu/drm/ttm/ttm_bo_vm.c 				up_read(&vmf->vma->vm_mm->mmap_sem);
vmf               191 drivers/gpu/drm/ttm/ttm_bo_vm.c 	ret = ttm_bo_vm_fault_idle(bo, vmf);
vmf               194 drivers/gpu/drm/ttm/ttm_bo_vm.c 		    !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
vmf               144 drivers/gpu/drm/udl/udl_drv.h vm_fault_t udl_gem_fault(struct vm_fault *vmf);
vmf               103 drivers/gpu/drm/udl/udl_gem.c vm_fault_t udl_gem_fault(struct vm_fault *vmf)
vmf               105 drivers/gpu/drm/udl/udl_gem.c 	struct vm_area_struct *vma = vmf->vma;
vmf               110 drivers/gpu/drm/udl/udl_gem.c 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vmf               116 drivers/gpu/drm/udl/udl_gem.c 	return vmf_insert_page(vma, vmf->address, page);
vmf               687 drivers/gpu/drm/vc4/vc4_bo.c vm_fault_t vc4_fault(struct vm_fault *vmf)
vmf               689 drivers/gpu/drm/vc4/vc4_bo.c 	struct vm_area_struct *vma = vmf->vma;
vmf               730 drivers/gpu/drm/vc4/vc4_drv.h vm_fault_t vc4_fault(struct vm_fault *vmf);
vmf                71 drivers/gpu/drm/vgem/vgem_drv.c static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
vmf                73 drivers/gpu/drm/vgem/vgem_drv.c 	struct vm_area_struct *vma = vmf->vma;
vmf                76 drivers/gpu/drm/vgem/vgem_drv.c 	unsigned long vaddr = vmf->address;
vmf                90 drivers/gpu/drm/vgem/vgem_drv.c 		vmf->page = obj->pages[page_offset];
vmf               101 drivers/gpu/drm/vgem/vgem_drv.c 			vmf->page = page;
vmf               124 drivers/gpu/drm/vkms/vkms_drv.h vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
vmf                43 drivers/gpu/drm/vkms/vkms_gem.c vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
vmf                45 drivers/gpu/drm/vkms/vkms_gem.c 	struct vm_area_struct *vma = vmf->vma;
vmf                47 drivers/gpu/drm/vkms/vkms_gem.c 	unsigned long vaddr = vmf->address;
vmf                61 drivers/gpu/drm/vkms/vkms_gem.c 		vmf->page = obj->pages[page_offset];
vmf                73 drivers/gpu/drm/vkms/vkms_gem.c 			vmf->page = page;
vmf              1088 drivers/hsi/clients/cmt_speech.c static vm_fault_t cs_char_vma_fault(struct vm_fault *vmf)
vmf              1090 drivers/hsi/clients/cmt_speech.c 	struct cs_char *csdata = vmf->vma->vm_private_data;
vmf              1095 drivers/hsi/clients/cmt_speech.c 	vmf->page = page;
vmf              1580 drivers/hwtracing/intel_th/msu.c static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
vmf              1582 drivers/hwtracing/intel_th/msu.c 	struct msc_iter *iter = vmf->vma->vm_file->private_data;
vmf              1585 drivers/hwtracing/intel_th/msu.c 	vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
vmf              1586 drivers/hwtracing/intel_th/msu.c 	if (!vmf->page)
vmf              1589 drivers/hwtracing/intel_th/msu.c 	get_page(vmf->page);
vmf              1590 drivers/hwtracing/intel_th/msu.c 	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
vmf              1591 drivers/hwtracing/intel_th/msu.c 	vmf->page->index = vmf->pgoff;
vmf               890 drivers/infiniband/core/uverbs_main.c static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
vmf               892 drivers/infiniband/core/uverbs_main.c 	struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
vmf               893 drivers/infiniband/core/uverbs_main.c 	struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
vmf               900 drivers/infiniband/core/uverbs_main.c 	if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
vmf               901 drivers/infiniband/core/uverbs_main.c 		vmf->page = ZERO_PAGE(vmf->address);
vmf               902 drivers/infiniband/core/uverbs_main.c 		get_page(vmf->page);
vmf               909 drivers/infiniband/core/uverbs_main.c 			alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
vmf               916 drivers/infiniband/core/uverbs_main.c 		vmf->page = ufile->disassociate_page;
vmf               917 drivers/infiniband/core/uverbs_main.c 		get_page(vmf->page);
vmf               113 drivers/infiniband/hw/hfi1/file_ops.c static vm_fault_t vma_fault(struct vm_fault *vmf);
vmf               359 drivers/infiniband/hw/hfi1/file_ops.c 	u8 subctxt, mapio = 0, vmf = 0, type;
vmf               499 drivers/infiniband/hw/hfi1/file_ops.c 		vmf = 1;
vmf               531 drivers/infiniband/hw/hfi1/file_ops.c 		vmf = 1;
vmf               537 drivers/infiniband/hw/hfi1/file_ops.c 		vmf = 1;
vmf               544 drivers/infiniband/hw/hfi1/file_ops.c 		vmf = 1;
vmf               556 drivers/infiniband/hw/hfi1/file_ops.c 		vmf = 1;
vmf               575 drivers/infiniband/hw/hfi1/file_ops.c 		    ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
vmf               577 drivers/infiniband/hw/hfi1/file_ops.c 	if (vmf) {
vmf               605 drivers/infiniband/hw/hfi1/file_ops.c static vm_fault_t vma_fault(struct vm_fault *vmf)
vmf               609 drivers/infiniband/hw/hfi1/file_ops.c 	page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
vmf               614 drivers/infiniband/hw/hfi1/file_ops.c 	vmf->page = page;
vmf               875 drivers/infiniband/hw/qib/qib_file_ops.c static vm_fault_t qib_file_vma_fault(struct vm_fault *vmf)
vmf               879 drivers/infiniband/hw/qib/qib_file_ops.c 	page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
vmf               884 drivers/infiniband/hw/qib/qib_file_ops.c 	vmf->page = page;
vmf               438 drivers/media/v4l2-core/videobuf-dma-sg.c static vm_fault_t videobuf_vm_fault(struct vm_fault *vmf)
vmf               440 drivers/media/v4l2-core/videobuf-dma-sg.c 	struct vm_area_struct *vma = vmf->vma;
vmf               444 drivers/media/v4l2-core/videobuf-dma-sg.c 		vmf->address, vma->vm_start, vma->vm_end);
vmf               449 drivers/media/v4l2-core/videobuf-dma-sg.c 	clear_user_highpage(page, vmf->address);
vmf               450 drivers/media/v4l2-core/videobuf-dma-sg.c 	vmf->page = page;
vmf               126 drivers/misc/cxl/context.c static vm_fault_t cxl_mmap_fault(struct vm_fault *vmf)
vmf               128 drivers/misc/cxl/context.c 	struct vm_area_struct *vma = vmf->vma;
vmf               133 drivers/misc/cxl/context.c 	offset = vmf->pgoff << PAGE_SHIFT;
vmf               136 drivers/misc/cxl/context.c 			__func__, ctx->pe, vmf->address, offset);
vmf               161 drivers/misc/cxl/context.c 			vmf->page = ctx->ff_page;
vmf               168 drivers/misc/cxl/context.c 	ret = vmf_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
vmf               137 drivers/misc/ocxl/context.c static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf)
vmf               139 drivers/misc/ocxl/context.c 	struct vm_area_struct *vma = vmf->vma;
vmf               144 drivers/misc/ocxl/context.c 	offset = vmf->pgoff << PAGE_SHIFT;
vmf               146 drivers/misc/ocxl/context.c 		ctx->pasid, vmf->address, offset);
vmf               149 drivers/misc/ocxl/context.c 		ret = map_pp_mmio(vma, vmf->address, offset, ctx);
vmf               151 drivers/misc/ocxl/context.c 		ret = map_afu_irq(vma, vmf->address, offset, ctx);
vmf                74 drivers/misc/ocxl/sysfs.c static vm_fault_t global_mmio_fault(struct vm_fault *vmf)
vmf                76 drivers/misc/ocxl/sysfs.c 	struct vm_area_struct *vma = vmf->vma;
vmf                80 drivers/misc/ocxl/sysfs.c 	if (vmf->pgoff >= (afu->config.global_mmio_size >> PAGE_SHIFT))
vmf                83 drivers/misc/ocxl/sysfs.c 	offset = vmf->pgoff;
vmf                85 drivers/misc/ocxl/sysfs.c 	return vmf_insert_pfn(vma, vmf->address, offset);
vmf               916 drivers/misc/sgi-gru/grumain.c vm_fault_t gru_fault(struct vm_fault *vmf)
vmf               918 drivers/misc/sgi-gru/grumain.c 	struct vm_area_struct *vma = vmf->vma;
vmf               923 drivers/misc/sgi-gru/grumain.c 	vaddr = vmf->address;
vmf               654 drivers/misc/sgi-gru/grutables.h extern vm_fault_t gru_fault(struct vm_fault *vmf);
vmf              1125 drivers/scsi/cxlflash/ocxl_hw.c static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf)
vmf              1127 drivers/scsi/cxlflash/ocxl_hw.c 	struct vm_area_struct *vma = vmf->vma;
vmf              1132 drivers/scsi/cxlflash/ocxl_hw.c 	offset = vmf->pgoff << PAGE_SHIFT;
vmf              1148 drivers/scsi/cxlflash/ocxl_hw.c 	return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
vmf              1103 drivers/scsi/cxlflash/superpipe.c static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf)
vmf              1105 drivers/scsi/cxlflash/superpipe.c 	struct vm_area_struct *vma = vmf->vma;
vmf              1134 drivers/scsi/cxlflash/superpipe.c 		rc = ctxi->cxl_mmap_vmops->fault(vmf);
vmf              1147 drivers/scsi/cxlflash/superpipe.c 		vmf->page = err_page;
vmf              1223 drivers/scsi/sg.c sg_vma_fault(struct vm_fault *vmf)
vmf              1225 drivers/scsi/sg.c 	struct vm_area_struct *vma = vmf->vma;
vmf              1234 drivers/scsi/sg.c 	offset = vmf->pgoff << PAGE_SHIFT;
vmf              1249 drivers/scsi/sg.c 			vmf->page = page;
vmf              1526 drivers/target/target_core_user.c static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
vmf              1528 drivers/target/target_core_user.c 	struct tcmu_dev *udev = vmf->vma->vm_private_data;
vmf              1534 drivers/target/target_core_user.c 	int mi = tcmu_find_mem_index(vmf->vma);
vmf              1542 drivers/target/target_core_user.c 	offset = (vmf->pgoff - mi) << PAGE_SHIFT;
vmf              1559 drivers/target/target_core_user.c 	vmf->page = page;
vmf               667 drivers/uio/uio.c static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
vmf               669 drivers/uio/uio.c 	struct uio_device *idev = vmf->vma->vm_private_data;
vmf               682 drivers/uio/uio.c 	mi = uio_find_mem_index(vmf->vma);
vmf               692 drivers/uio/uio.c 	offset = (vmf->pgoff - mi) << PAGE_SHIFT;
vmf               700 drivers/uio/uio.c 	vmf->page = page;
vmf              1245 drivers/usb/mon/mon_bin.c static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf)
vmf              1247 drivers/usb/mon/mon_bin.c 	struct mon_reader_bin *rp = vmf->vma->vm_private_data;
vmf              1251 drivers/usb/mon/mon_bin.c 	offset = vmf->pgoff << PAGE_SHIFT;
vmf              1257 drivers/usb/mon/mon_bin.c 	vmf->page = pageptr;
vmf               116 drivers/vfio/pci/vfio_pci_nvlink2.c static vm_fault_t vfio_pci_nvgpu_mmap_fault(struct vm_fault *vmf)
vmf               119 drivers/vfio/pci/vfio_pci_nvlink2.c 	struct vm_area_struct *vma = vmf->vma;
vmf               122 drivers/vfio/pci/vfio_pci_nvlink2.c 	unsigned long vmf_off = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vmf               128 drivers/vfio/pci/vfio_pci_nvlink2.c 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
vmf               130 drivers/vfio/pci/vfio_pci_nvlink2.c 			vmf->address, ret);
vmf                40 drivers/video/fbdev/core/fb_defio.c static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
vmf                44 drivers/video/fbdev/core/fb_defio.c 	struct fb_info *info = vmf->vma->vm_private_data;
vmf                46 drivers/video/fbdev/core/fb_defio.c 	offset = vmf->pgoff << PAGE_SHIFT;
vmf                56 drivers/video/fbdev/core/fb_defio.c 	if (vmf->vma->vm_file)
vmf                57 drivers/video/fbdev/core/fb_defio.c 		page->mapping = vmf->vma->vm_file->f_mapping;
vmf                62 drivers/video/fbdev/core/fb_defio.c 	page->index = vmf->pgoff;
vmf                64 drivers/video/fbdev/core/fb_defio.c 	vmf->page = page;
vmf                93 drivers/video/fbdev/core/fb_defio.c static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
vmf                95 drivers/video/fbdev/core/fb_defio.c 	struct page *page = vmf->page;
vmf                96 drivers/video/fbdev/core/fb_defio.c 	struct fb_info *info = vmf->vma->vm_private_data;
vmf               106 drivers/video/fbdev/core/fb_defio.c 	file_update_time(vmf->vma->vm_file);
vmf               117 drivers/xen/privcmd-buf.c static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf)
vmf               120 drivers/xen/privcmd-buf.c 		 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
vmf               121 drivers/xen/privcmd-buf.c 		 vmf->pgoff, (void *)vmf->address);
vmf               912 drivers/xen/privcmd.c static vm_fault_t privcmd_fault(struct vm_fault *vmf)
vmf               915 drivers/xen/privcmd.c 	       vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
vmf               916 drivers/xen/privcmd.c 	       vmf->pgoff, (void *)vmf->address);
vmf               543 fs/9p/vfs_file.c v9fs_vm_page_mkwrite(struct vm_fault *vmf)
vmf               546 fs/9p/vfs_file.c 	struct page *page = vmf->page;
vmf               547 fs/9p/vfs_file.c 	struct file *filp = vmf->vma->vm_file;
vmf              1348 fs/afs/internal.h extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf);
vmf               776 fs/afs/write.c vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
vmf               778 fs/afs/write.c 	struct file *file = vmf->vma->vm_file;
vmf               784 fs/afs/write.c 	       vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
vmf               792 fs/afs/write.c 	fscache_wait_on_page_write(vnode->cache, vmf->page);
vmf               795 fs/afs/write.c 	if (PageWriteback(vmf->page) &&
vmf               796 fs/afs/write.c 	    wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
vmf               799 fs/afs/write.c 	if (lock_page_killable(vmf->page) < 0)
vmf               806 fs/afs/write.c 	wait_on_page_writeback(vmf->page);
vmf               811 fs/afs/write.c 			     vmf->page->index, priv);
vmf               812 fs/afs/write.c 	SetPagePrivate(vmf->page);
vmf               813 fs/afs/write.c 	set_page_private(vmf->page, priv);
vmf              2860 fs/btrfs/ctree.h vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf);
vmf              9046 fs/btrfs/inode.c vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
vmf              9048 fs/btrfs/inode.c 	struct page *page = vmf->page;
vmf              9049 fs/btrfs/inode.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vmf              9084 fs/btrfs/inode.c 		ret2 = file_update_time(vmf->vma->vm_file);
vmf              2467 fs/buffer.c    int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
vmf              2470 fs/buffer.c    	struct page *page = vmf->page;
vmf              1423 fs/ceph/addr.c static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
vmf              1425 fs/ceph/addr.c 	struct vm_area_struct *vma = vmf->vma;
vmf              1430 fs/ceph/addr.c 	loff_t off = vmf->pgoff << PAGE_SHIFT;
vmf              1457 fs/ceph/addr.c 		ret = filemap_fault(vmf);
vmf              1498 fs/ceph/addr.c 		vmf->page = page;
vmf              1515 fs/ceph/addr.c static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
vmf              1517 fs/ceph/addr.c 	struct vm_area_struct *vma = vmf->vma;
vmf              1522 fs/ceph/addr.c 	struct page *page = vmf->page;
vmf              4040 fs/cifs/file.c cifs_page_mkwrite(struct vm_fault *vmf)
vmf              4042 fs/cifs/file.c 	struct page *page = vmf->page;
vmf               423 fs/cramfs/inode.c 			vm_fault_t vmf;
vmf               426 fs/cramfs/inode.c 			vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn);
vmf               427 fs/cramfs/inode.c 			if (vmf & VM_FAULT_ERROR)
vmf               428 fs/cramfs/inode.c 				ret = vm_fault_to_errno(vmf, 0);
vmf               717 fs/dax.c       		struct address_space *mapping, struct vm_fault *vmf,
vmf               741 fs/dax.c       		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
vmf              1032 fs/dax.c       		struct vm_fault *vmf)
vmf              1035 fs/dax.c       	unsigned long vaddr = vmf->address;
vmf              1039 fs/dax.c       	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
vmf              1042 fs/dax.c       	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
vmf              1043 fs/dax.c       	trace_dax_load_hole(inode, vmf, ret);
vmf              1245 fs/dax.c       static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
vmf              1248 fs/dax.c       	struct vm_area_struct *vma = vmf->vma;
vmf              1250 fs/dax.c       	XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
vmf              1252 fs/dax.c       	unsigned long vaddr = vmf->address;
vmf              1253 fs/dax.c       	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
vmf              1257 fs/dax.c       	bool write = vmf->flags & FAULT_FLAG_WRITE;
vmf              1263 fs/dax.c       	trace_dax_pte_fault(inode, vmf, ret);
vmf              1274 fs/dax.c       	if (write && !vmf->cow_page)
vmf              1289 fs/dax.c       	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
vmf              1311 fs/dax.c       	if (vmf->cow_page) {
vmf              1317 fs/dax.c       			clear_user_highpage(vmf->cow_page, vaddr);
vmf              1321 fs/dax.c       					sector, PAGE_SIZE, vmf->cow_page, vaddr);
vmf              1332 fs/dax.c       		__SetPageUptodate(vmf->cow_page);
vmf              1333 fs/dax.c       		ret = finish_fault(vmf);
vmf              1352 fs/dax.c       		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
vmf              1370 fs/dax.c       		trace_dax_insert_mapping(inode, vmf, entry);
vmf              1380 fs/dax.c       			ret = dax_load_hole(&xas, mapping, &entry, vmf);
vmf              1409 fs/dax.c       	trace_dax_pte_fault_done(inode, vmf, ret);
vmf              1414 fs/dax.c       static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
vmf              1417 fs/dax.c       	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
vmf              1418 fs/dax.c       	unsigned long pmd_addr = vmf->address & PMD_MASK;
vmf              1419 fs/dax.c       	struct vm_area_struct *vma = vmf->vma;
vmf              1427 fs/dax.c       	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
vmf              1433 fs/dax.c       	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
vmf              1442 fs/dax.c       	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
vmf              1443 fs/dax.c       	if (!pmd_none(*(vmf->pmd))) {
vmf              1449 fs/dax.c       		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
vmf              1452 fs/dax.c       	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
vmf              1454 fs/dax.c       	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
vmf              1456 fs/dax.c       	trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
vmf              1462 fs/dax.c       	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
vmf              1466 fs/dax.c       static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
vmf              1469 fs/dax.c       	struct vm_area_struct *vma = vmf->vma;
vmf              1471 fs/dax.c       	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
vmf              1472 fs/dax.c       	unsigned long pmd_addr = vmf->address & PMD_MASK;
vmf              1473 fs/dax.c       	bool write = vmf->flags & FAULT_FLAG_WRITE;
vmf              1492 fs/dax.c       	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
vmf              1500 fs/dax.c       	if ((vmf->pgoff & PG_PMD_COLOUR) !=
vmf              1501 fs/dax.c       	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
vmf              1541 fs/dax.c       	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
vmf              1542 fs/dax.c       			!pmd_devmap(*vmf->pmd)) {
vmf              1568 fs/dax.c       		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
vmf              1585 fs/dax.c       		trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
vmf              1586 fs/dax.c       		result = vmf_insert_pfn_pmd(vmf, pfn, write);
vmf              1592 fs/dax.c       		result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
vmf              1618 fs/dax.c       		split_huge_pmd(vma, vmf->pmd, vmf->address);
vmf              1622 fs/dax.c       	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
vmf              1626 fs/dax.c       static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
vmf              1646 fs/dax.c       vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
vmf              1651 fs/dax.c       		return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
vmf              1653 fs/dax.c       		return dax_iomap_pmd_fault(vmf, pfnp, ops);
vmf              1670 fs/dax.c       dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
vmf              1672 fs/dax.c       	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
vmf              1673 fs/dax.c       	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
vmf              1684 fs/dax.c       		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
vmf              1692 fs/dax.c       		ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
vmf              1695 fs/dax.c       		ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
vmf              1700 fs/dax.c       	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
vmf              1714 fs/dax.c       vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
vmf              1718 fs/dax.c       	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
vmf              1722 fs/dax.c       	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
vmf              1725 fs/dax.c       	return dax_insert_pfn_mkwrite(vmf, pfn, order);
vmf                91 fs/ext2/file.c static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
vmf                93 fs/ext2/file.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vmf                97 fs/ext2/file.c 	if (vmf->flags & FAULT_FLAG_WRITE) {
vmf                99 fs/ext2/file.c 		file_update_time(vmf->vma->vm_file);
vmf               103 fs/ext2/file.c 	ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops);
vmf               106 fs/ext2/file.c 	if (vmf->flags & FAULT_FLAG_WRITE)
vmf              2638 fs/ext4/ext4.h extern vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf);
vmf              2639 fs/ext4/ext4.h extern vm_fault_t ext4_filemap_fault(struct vm_fault *vmf);
vmf               291 fs/ext4/file.c static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
vmf               298 fs/ext4/file.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vmf               312 fs/ext4/file.c 	bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
vmf               313 fs/ext4/file.c 		(vmf->vma->vm_flags & VM_SHARED);
vmf               318 fs/ext4/file.c 		file_update_time(vmf->vma->vm_file);
vmf               331 fs/ext4/file.c 	result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
vmf               340 fs/ext4/file.c 			result = dax_finish_sync_fault(vmf, pe_size, pfn);
vmf               350 fs/ext4/file.c static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
vmf               352 fs/ext4/file.c 	return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
vmf              6237 fs/ext4/inode.c vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
vmf              6239 fs/ext4/inode.c 	struct vm_area_struct *vma = vmf->vma;
vmf              6240 fs/ext4/inode.c 	struct page *page = vmf->page;
vmf              6269 fs/ext4/inode.c 			err = block_page_mkwrite(vma, vmf,
vmf              6316 fs/ext4/inode.c 	err = block_page_mkwrite(vma, vmf, get_block);
vmf              6338 fs/ext4/inode.c vm_fault_t ext4_filemap_fault(struct vm_fault *vmf)
vmf              6340 fs/ext4/inode.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vmf              6344 fs/ext4/inode.c 	ret = filemap_fault(vmf);
vmf                34 fs/f2fs/file.c static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
vmf                36 fs/f2fs/file.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vmf                40 fs/f2fs/file.c 	ret = filemap_fault(vmf);
vmf                43 fs/f2fs/file.c 	trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
vmf                48 fs/f2fs/file.c static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
vmf                50 fs/f2fs/file.c 	struct page *page = vmf->page;
vmf                51 fs/f2fs/file.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vmf                73 fs/f2fs/file.c 	file_update_time(vmf->vma->vm_file);
vmf              2276 fs/fuse/file.c static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf)
vmf              2278 fs/fuse/file.c 	struct page *page = vmf->page;
vmf              2279 fs/fuse/file.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vmf              2281 fs/fuse/file.c 	file_update_time(vmf->vma->vm_file);
vmf               445 fs/gfs2/file.c static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
vmf               447 fs/gfs2/file.c 	struct page *page = vmf->page;
vmf               448 fs/gfs2/file.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vmf               465 fs/gfs2/file.c 	gfs2_size_hint(vmf->vma->vm_file, pos, PAGE_SIZE);
vmf               473 fs/gfs2/file.c 	file_update_time(vmf->vma->vm_file);
vmf              1033 fs/iomap/buffered-io.c vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
vmf              1035 fs/iomap/buffered-io.c 	struct page *page = vmf->page;
vmf              1036 fs/iomap/buffered-io.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vmf               350 fs/kernfs/file.c static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf)
vmf               352 fs/kernfs/file.c 	struct file *file = vmf->vma->vm_file;
vmf               364 fs/kernfs/file.c 		ret = of->vm_ops->fault(vmf);
vmf               370 fs/kernfs/file.c static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf)
vmf               372 fs/kernfs/file.c 	struct file *file = vmf->vma->vm_file;
vmf               384 fs/kernfs/file.c 		ret = of->vm_ops->page_mkwrite(vmf);
vmf               532 fs/nfs/file.c  static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf)
vmf               534 fs/nfs/file.c  	struct page *page = vmf->page;
vmf               535 fs/nfs/file.c  	struct file *filp = vmf->vma->vm_file;
vmf                45 fs/nilfs2/file.c static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
vmf                47 fs/nilfs2/file.c 	struct vm_area_struct *vma = vmf->vma;
vmf                48 fs/nilfs2/file.c 	struct page *page = vmf->page;
vmf                99 fs/nilfs2/file.c 	ret = block_page_mkwrite(vma, vmf, nilfs_get_block);
vmf                33 fs/ocfs2/mmap.c static vm_fault_t ocfs2_fault(struct vm_fault *vmf)
vmf                35 fs/ocfs2/mmap.c 	struct vm_area_struct *vma = vmf->vma;
vmf                40 fs/ocfs2/mmap.c 	ret = filemap_fault(vmf);
vmf                44 fs/ocfs2/mmap.c 			  vma, vmf->page, vmf->pgoff);
vmf               115 fs/ocfs2/mmap.c static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf)
vmf               117 fs/ocfs2/mmap.c 	struct page *page = vmf->page;
vmf               118 fs/ocfs2/mmap.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vmf               146 fs/ocfs2/mmap.c 	ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page);
vmf               436 fs/orangefs/file.c static vm_fault_t orangefs_fault(struct vm_fault *vmf)
vmf               438 fs/orangefs/file.c 	struct file *file = vmf->vma->vm_file;
vmf               449 fs/orangefs/file.c 	return filemap_fault(vmf);
vmf               719 fs/orangefs/inode.c vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
vmf               721 fs/orangefs/inode.c 	struct page *page = vmf->page;
vmf               722 fs/orangefs/inode.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vmf               775 fs/orangefs/inode.c 	file_update_time(vmf->vma->vm_file);
vmf               412 fs/proc/vmcore.c static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
vmf               415 fs/proc/vmcore.c 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
vmf               416 fs/proc/vmcore.c 	pgoff_t index = vmf->pgoff;
vmf               437 fs/proc/vmcore.c 	vmf->page = page;
vmf              1507 fs/ubifs/file.c static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
vmf              1509 fs/ubifs/file.c 	struct page *page = vmf->page;
vmf              1510 fs/ubifs/file.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vmf               352 fs/userfaultfd.c vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
vmf               354 fs/userfaultfd.c 	struct mm_struct *mm = vmf->vma->vm_mm;
vmf               381 fs/userfaultfd.c 	ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
vmf               429 fs/userfaultfd.c 	if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
vmf               435 fs/userfaultfd.c 		BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
vmf               440 fs/userfaultfd.c 			       vmf->flags);
vmf               452 fs/userfaultfd.c 	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
vmf               460 fs/userfaultfd.c 	uwq.msg = userfault_msg(vmf->address, vmf->flags, reason,
vmf               466 fs/userfaultfd.c 		(vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
vmf               485 fs/userfaultfd.c 	if (!is_vm_hugetlb_page(vmf->vma))
vmf               486 fs/userfaultfd.c 		must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
vmf               489 fs/userfaultfd.c 		must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
vmf               490 fs/userfaultfd.c 						       vmf->address,
vmf               491 fs/userfaultfd.c 						       vmf->flags, reason);
vmf              1140 fs/xfs/xfs_file.c 	struct vm_fault		*vmf,
vmf              1144 fs/xfs/xfs_file.c 	struct inode		*inode = file_inode(vmf->vma->vm_file);
vmf              1152 fs/xfs/xfs_file.c 		file_update_time(vmf->vma->vm_file);
vmf              1159 fs/xfs/xfs_file.c 		ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
vmf              1161 fs/xfs/xfs_file.c 			ret = dax_finish_sync_fault(vmf, pe_size, pfn);
vmf              1164 fs/xfs/xfs_file.c 			ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
vmf              1166 fs/xfs/xfs_file.c 			ret = filemap_fault(vmf);
vmf              1177 fs/xfs/xfs_file.c 	struct vm_fault		*vmf)
vmf              1180 fs/xfs/xfs_file.c 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
vmf              1181 fs/xfs/xfs_file.c 			IS_DAX(file_inode(vmf->vma->vm_file)) &&
vmf              1182 fs/xfs/xfs_file.c 			(vmf->flags & FAULT_FLAG_WRITE));
vmf              1187 fs/xfs/xfs_file.c 	struct vm_fault		*vmf,
vmf              1190 fs/xfs/xfs_file.c 	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
vmf              1194 fs/xfs/xfs_file.c 	return __xfs_filemap_fault(vmf, pe_size,
vmf              1195 fs/xfs/xfs_file.c 			(vmf->flags & FAULT_FLAG_WRITE));
vmf              1200 fs/xfs/xfs_file.c 	struct vm_fault		*vmf)
vmf              1202 fs/xfs/xfs_file.c 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
vmf              1212 fs/xfs/xfs_file.c 	struct vm_fault		*vmf)
vmf              1215 fs/xfs/xfs_file.c 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
vmf               244 include/linux/buffer_head.h int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
vmf               216 include/linux/dax.h vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
vmf               218 include/linux/dax.h vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
vmf                10 include/linux/huge_mm.h extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
vmf                14 include/linux/huge_mm.h extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
vmf                20 include/linux/huge_mm.h extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
vmf                22 include/linux/huge_mm.h static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
vmf                27 include/linux/huge_mm.h extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
vmf                50 include/linux/huge_mm.h vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
vmf                51 include/linux/huge_mm.h vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
vmf               246 include/linux/huge_mm.h extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
vmf               371 include/linux/huge_mm.h static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
vmf               175 include/linux/iomap.h vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
vmf               804 include/linux/kvm_host.h vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
vmf                89 include/linux/memremap.h 	vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
vmf               472 include/linux/mm.h 	vm_fault_t (*fault)(struct vm_fault *vmf);
vmf               473 include/linux/mm.h 	vm_fault_t (*huge_fault)(struct vm_fault *vmf,
vmf               475 include/linux/mm.h 	void (*map_pages)(struct vm_fault *vmf,
vmf               481 include/linux/mm.h 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
vmf               484 include/linux/mm.h 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
vmf               849 include/linux/mm.h vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
vmf               851 include/linux/mm.h vm_fault_t finish_fault(struct vm_fault *vmf);
vmf               852 include/linux/mm.h vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
vmf              2421 include/linux/mm.h extern vm_fault_t filemap_fault(struct vm_fault *vmf);
vmf              2422 include/linux/mm.h extern void filemap_map_pages(struct vm_fault *vmf,
vmf              2424 include/linux/mm.h extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
vmf               734 include/linux/mm_types.h 				struct vm_fault *vmf);
vmf               427 include/linux/swap.h 				struct vm_fault *vmf);
vmf               429 include/linux/swap.h 				struct vm_fault *vmf);
vmf               539 include/linux/swap.h 				gfp_t gfp_mask, struct vm_fault *vmf)
vmf               545 include/linux/swap.h 			struct vm_fault *vmf)
vmf                33 include/linux/userfaultfd_k.h extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
vmf                82 include/linux/userfaultfd_k.h static inline vm_fault_t handle_userfault(struct vm_fault *vmf,
vmf                11 include/trace/events/fs_dax.h 	TP_PROTO(struct inode *inode, struct vm_fault *vmf,
vmf                13 include/trace/events/fs_dax.h 	TP_ARGS(inode, vmf, max_pgoff, result),
vmf                29 include/trace/events/fs_dax.h 		__entry->vm_start = vmf->vma->vm_start;
vmf                30 include/trace/events/fs_dax.h 		__entry->vm_end = vmf->vma->vm_end;
vmf                31 include/trace/events/fs_dax.h 		__entry->vm_flags = vmf->vma->vm_flags;
vmf                32 include/trace/events/fs_dax.h 		__entry->address = vmf->address;
vmf                33 include/trace/events/fs_dax.h 		__entry->flags = vmf->flags;
vmf                34 include/trace/events/fs_dax.h 		__entry->pgoff = vmf->pgoff;
vmf                56 include/trace/events/fs_dax.h 	TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
vmf                58 include/trace/events/fs_dax.h 	TP_ARGS(inode, vmf, max_pgoff, result))
vmf                64 include/trace/events/fs_dax.h 	TP_PROTO(struct inode *inode, struct vm_fault *vmf,
vmf                67 include/trace/events/fs_dax.h 	TP_ARGS(inode, vmf, zero_page, radix_entry),
vmf                79 include/trace/events/fs_dax.h 		__entry->vm_flags = vmf->vma->vm_flags;
vmf                80 include/trace/events/fs_dax.h 		__entry->address = vmf->address;
vmf                98 include/trace/events/fs_dax.h 	TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
vmf               100 include/trace/events/fs_dax.h 	TP_ARGS(inode, vmf, zero_page, radix_entry))
vmf               106 include/trace/events/fs_dax.h 	TP_PROTO(struct inode *inode, struct vm_fault *vmf,
vmf               108 include/trace/events/fs_dax.h 	TP_ARGS(inode, vmf, length, pfn, radix_entry),
vmf               122 include/trace/events/fs_dax.h 		__entry->vm_flags = vmf->vma->vm_flags;
vmf               123 include/trace/events/fs_dax.h 		__entry->address = vmf->address;
vmf               124 include/trace/events/fs_dax.h 		__entry->write = vmf->flags & FAULT_FLAG_WRITE;
vmf               147 include/trace/events/fs_dax.h 	TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
vmf               149 include/trace/events/fs_dax.h 	TP_ARGS(inode, vmf, length, pfn, radix_entry))
vmf               154 include/trace/events/fs_dax.h 	TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result),
vmf               155 include/trace/events/fs_dax.h 	TP_ARGS(inode, vmf, result),
vmf               168 include/trace/events/fs_dax.h 		__entry->vm_flags = vmf->vma->vm_flags;
vmf               169 include/trace/events/fs_dax.h 		__entry->address = vmf->address;
vmf               170 include/trace/events/fs_dax.h 		__entry->flags = vmf->flags;
vmf               171 include/trace/events/fs_dax.h 		__entry->pgoff = vmf->pgoff;
vmf               188 include/trace/events/fs_dax.h 	TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result), \
vmf               189 include/trace/events/fs_dax.h 	TP_ARGS(inode, vmf, result))
vmf               198 include/trace/events/fs_dax.h 	TP_PROTO(struct inode *inode, struct vm_fault *vmf, void *radix_entry),
vmf               199 include/trace/events/fs_dax.h 	TP_ARGS(inode, vmf, radix_entry),
vmf               211 include/trace/events/fs_dax.h 		__entry->vm_flags = vmf->vma->vm_flags;
vmf               212 include/trace/events/fs_dax.h 		__entry->address = vmf->address;
vmf               213 include/trace/events/fs_dax.h 		__entry->write = vmf->flags & FAULT_FLAG_WRITE;
vmf               429 ipc/shm.c      static vm_fault_t shm_fault(struct vm_fault *vmf)
vmf               431 ipc/shm.c      	struct file *file = vmf->vma->vm_file;
vmf               434 ipc/shm.c      	return sfd->vm_ops->fault(vmf);
vmf              5431 kernel/events/core.c static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
vmf              5433 kernel/events/core.c 	struct perf_event *event = vmf->vma->vm_file->private_data;
vmf              5437 kernel/events/core.c 	if (vmf->flags & FAULT_FLAG_MKWRITE) {
vmf              5438 kernel/events/core.c 		if (vmf->pgoff == 0)
vmf              5448 kernel/events/core.c 	if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
vmf              5451 kernel/events/core.c 	vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
vmf              5452 kernel/events/core.c 	if (!vmf->page)
vmf              5455 kernel/events/core.c 	get_page(vmf->page);
vmf              5456 kernel/events/core.c 	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
vmf              5457 kernel/events/core.c 	vmf->page->index   = vmf->pgoff;
vmf                42 kernel/relay.c static vm_fault_t relay_buf_fault(struct vm_fault *vmf)
vmf                45 kernel/relay.c 	struct rchan_buf *buf = vmf->vma->vm_private_data;
vmf                46 kernel/relay.c 	pgoff_t pgoff = vmf->pgoff;
vmf                55 kernel/relay.c 	vmf->page = page;
vmf              2343 mm/filemap.c   static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
vmf              2354 mm/filemap.c   	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
vmf              2357 mm/filemap.c   	*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
vmf              2358 mm/filemap.c   	if (vmf->flags & FAULT_FLAG_KILLABLE) {
vmf              2367 mm/filemap.c   				up_read(&vmf->vma->vm_mm->mmap_sem);
vmf              2383 mm/filemap.c   static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
vmf              2385 mm/filemap.c   	struct file *file = vmf->vma->vm_file;
vmf              2389 mm/filemap.c   	pgoff_t offset = vmf->pgoff;
vmf              2392 mm/filemap.c   	if (vmf->vma->vm_flags & VM_RAND_READ)
vmf              2397 mm/filemap.c   	if (vmf->vma->vm_flags & VM_SEQ_READ) {
vmf              2398 mm/filemap.c   		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
vmf              2418 mm/filemap.c   	fpin = maybe_unlock_mmap_for_io(vmf, fpin);
vmf              2431 mm/filemap.c   static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
vmf              2434 mm/filemap.c   	struct file *file = vmf->vma->vm_file;
vmf              2438 mm/filemap.c   	pgoff_t offset = vmf->pgoff;
vmf              2441 mm/filemap.c   	if (vmf->vma->vm_flags & VM_RAND_READ)
vmf              2446 mm/filemap.c   		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
vmf              2476 mm/filemap.c   vm_fault_t filemap_fault(struct vm_fault *vmf)
vmf              2479 mm/filemap.c   	struct file *file = vmf->vma->vm_file;
vmf              2484 mm/filemap.c   	pgoff_t offset = vmf->pgoff;
vmf              2497 mm/filemap.c   	if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
vmf              2502 mm/filemap.c   		fpin = do_async_mmap_readahead(vmf, page);
vmf              2506 mm/filemap.c   		count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
vmf              2508 mm/filemap.c   		fpin = do_sync_mmap_readahead(vmf);
vmf              2512 mm/filemap.c   					  vmf->gfp_mask);
vmf              2520 mm/filemap.c   	if (!lock_page_maybe_drop_mmap(vmf, page, &fpin))
vmf              2559 mm/filemap.c   	vmf->page = page;
vmf              2570 mm/filemap.c   	fpin = maybe_unlock_mmap_for_io(vmf, fpin);
vmf              2602 mm/filemap.c   void filemap_map_pages(struct vm_fault *vmf,
vmf              2605 mm/filemap.c   	struct file *file = vmf->vma->vm_file;
vmf              2650 mm/filemap.c   		vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
vmf              2651 mm/filemap.c   		if (vmf->pte)
vmf              2652 mm/filemap.c   			vmf->pte += xas.xa_index - last_pgoff;
vmf              2654 mm/filemap.c   		if (alloc_set_pte(vmf, NULL, page))
vmf              2664 mm/filemap.c   		if (pmd_trans_huge(*vmf->pmd))
vmf              2671 mm/filemap.c   vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
vmf              2673 mm/filemap.c   	struct page *page = vmf->page;
vmf              2674 mm/filemap.c   	struct inode *inode = file_inode(vmf->vma->vm_file);
vmf              2678 mm/filemap.c   	file_update_time(vmf->vma->vm_file);
vmf              2726 mm/filemap.c   vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
vmf               575 mm/huge_memory.c static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
vmf               578 mm/huge_memory.c 	struct vm_area_struct *vma = vmf->vma;
vmf               581 mm/huge_memory.c 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
vmf               598 mm/huge_memory.c 	clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
vmf               606 mm/huge_memory.c 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
vmf               607 mm/huge_memory.c 	if (unlikely(!pmd_none(*vmf->pmd))) {
vmf               620 mm/huge_memory.c 			spin_unlock(vmf->ptl);
vmf               624 mm/huge_memory.c 			ret2 = handle_userfault(vmf, VM_UFFD_MISSING);
vmf               634 mm/huge_memory.c 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
vmf               635 mm/huge_memory.c 		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
vmf               638 mm/huge_memory.c 		spin_unlock(vmf->ptl);
vmf               645 mm/huge_memory.c 	spin_unlock(vmf->ptl);
vmf               707 mm/huge_memory.c vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
vmf               709 mm/huge_memory.c 	struct vm_area_struct *vma = vmf->vma;
vmf               712 mm/huge_memory.c 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
vmf               720 mm/huge_memory.c 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
vmf               736 mm/huge_memory.c 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
vmf               739 mm/huge_memory.c 		if (pmd_none(*vmf->pmd)) {
vmf               742 mm/huge_memory.c 				spin_unlock(vmf->ptl);
vmf               744 mm/huge_memory.c 				spin_unlock(vmf->ptl);
vmf               745 mm/huge_memory.c 				ret = handle_userfault(vmf, VM_UFFD_MISSING);
vmf               749 mm/huge_memory.c 						   haddr, vmf->pmd, zero_page);
vmf               750 mm/huge_memory.c 				spin_unlock(vmf->ptl);
vmf               754 mm/huge_memory.c 			spin_unlock(vmf->ptl);
vmf               766 mm/huge_memory.c 	return __do_huge_pmd_anonymous_page(vmf, page, gfp);
vmf               816 mm/huge_memory.c vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
vmf               818 mm/huge_memory.c 	unsigned long addr = vmf->address & PMD_MASK;
vmf               819 mm/huge_memory.c 	struct vm_area_struct *vma = vmf->vma;
vmf               845 mm/huge_memory.c 	insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
vmf               894 mm/huge_memory.c vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
vmf               896 mm/huge_memory.c 	unsigned long addr = vmf->address & PUD_MASK;
vmf               897 mm/huge_memory.c 	struct vm_area_struct *vma = vmf->vma;
vmf               916 mm/huge_memory.c 	insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
vmf              1155 mm/huge_memory.c void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
vmf              1159 mm/huge_memory.c 	bool write = vmf->flags & FAULT_FLAG_WRITE;
vmf              1161 mm/huge_memory.c 	vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
vmf              1162 mm/huge_memory.c 	if (unlikely(!pud_same(*vmf->pud, orig_pud)))
vmf              1168 mm/huge_memory.c 	haddr = vmf->address & HPAGE_PUD_MASK;
vmf              1169 mm/huge_memory.c 	if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write))
vmf              1170 mm/huge_memory.c 		update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);
vmf              1173 mm/huge_memory.c 	spin_unlock(vmf->ptl);
vmf              1177 mm/huge_memory.c void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
vmf              1181 mm/huge_memory.c 	bool write = vmf->flags & FAULT_FLAG_WRITE;
vmf              1183 mm/huge_memory.c 	vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
vmf              1184 mm/huge_memory.c 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
vmf              1190 mm/huge_memory.c 	haddr = vmf->address & HPAGE_PMD_MASK;
vmf              1191 mm/huge_memory.c 	if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
vmf              1192 mm/huge_memory.c 		update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
vmf              1195 mm/huge_memory.c 	spin_unlock(vmf->ptl);
vmf              1198 mm/huge_memory.c static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
vmf              1201 mm/huge_memory.c 	struct vm_area_struct *vma = vmf->vma;
vmf              1202 mm/huge_memory.c 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
vmf              1220 mm/huge_memory.c 					       vmf->address, page_to_nid(page));
vmf              1251 mm/huge_memory.c 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
vmf              1252 mm/huge_memory.c 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
vmf              1264 mm/huge_memory.c 	pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
vmf              1266 mm/huge_memory.c 	pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);
vmf              1275 mm/huge_memory.c 		page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false);
vmf              1278 mm/huge_memory.c 		vmf->pte = pte_offset_map(&_pmd, haddr);
vmf              1279 mm/huge_memory.c 		VM_BUG_ON(!pte_none(*vmf->pte));
vmf              1280 mm/huge_memory.c 		set_pte_at(vma->vm_mm, haddr, vmf->pte, entry);
vmf              1281 mm/huge_memory.c 		pte_unmap(vmf->pte);
vmf              1286 mm/huge_memory.c 	pmd_populate(vma->vm_mm, vmf->pmd, pgtable);
vmf              1288 mm/huge_memory.c 	spin_unlock(vmf->ptl);
vmf              1303 mm/huge_memory.c 	spin_unlock(vmf->ptl);
vmf              1315 mm/huge_memory.c vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
vmf              1317 mm/huge_memory.c 	struct vm_area_struct *vma = vmf->vma;
vmf              1320 mm/huge_memory.c 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
vmf              1325 mm/huge_memory.c 	vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
vmf              1329 mm/huge_memory.c 	spin_lock(vmf->ptl);
vmf              1330 mm/huge_memory.c 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
vmf              1341 mm/huge_memory.c 		spin_unlock(vmf->ptl);
vmf              1343 mm/huge_memory.c 		spin_lock(vmf->ptl);
vmf              1344 mm/huge_memory.c 		if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
vmf              1355 mm/huge_memory.c 		if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry,  1))
vmf              1356 mm/huge_memory.c 			update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
vmf              1363 mm/huge_memory.c 	spin_unlock(vmf->ptl);
vmf              1376 mm/huge_memory.c 			split_huge_pmd(vma, vmf->pmd, vmf->address);
vmf              1379 mm/huge_memory.c 			ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page);
vmf              1381 mm/huge_memory.c 				split_huge_pmd(vma, vmf->pmd, vmf->address);
vmf              1393 mm/huge_memory.c 		split_huge_pmd(vma, vmf->pmd, vmf->address);
vmf              1405 mm/huge_memory.c 		clear_huge_page(new_page, vmf->address, HPAGE_PMD_NR);
vmf              1407 mm/huge_memory.c 		copy_user_huge_page(new_page, page, vmf->address,
vmf              1415 mm/huge_memory.c 	spin_lock(vmf->ptl);
vmf              1418 mm/huge_memory.c 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
vmf              1419 mm/huge_memory.c 		spin_unlock(vmf->ptl);
vmf              1427 mm/huge_memory.c 		pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
vmf              1431 mm/huge_memory.c 		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
vmf              1432 mm/huge_memory.c 		update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
vmf              1442 mm/huge_memory.c 	spin_unlock(vmf->ptl);
vmf              1452 mm/huge_memory.c 	spin_unlock(vmf->ptl);
vmf              1534 mm/huge_memory.c vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
vmf              1536 mm/huge_memory.c 	struct vm_area_struct *vma = vmf->vma;
vmf              1539 mm/huge_memory.c 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
vmf              1547 mm/huge_memory.c 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
vmf              1548 mm/huge_memory.c 	if (unlikely(!pmd_same(pmd, *vmf->pmd)))
vmf              1556 mm/huge_memory.c 	if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
vmf              1557 mm/huge_memory.c 		page = pmd_page(*vmf->pmd);
vmf              1560 mm/huge_memory.c 		spin_unlock(vmf->ptl);
vmf              1596 mm/huge_memory.c 		spin_unlock(vmf->ptl);
vmf              1606 mm/huge_memory.c 	spin_unlock(vmf->ptl);
vmf              1610 mm/huge_memory.c 	spin_lock(vmf->ptl);
vmf              1611 mm/huge_memory.c 	if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
vmf              1655 mm/huge_memory.c 	spin_unlock(vmf->ptl);
vmf              1658 mm/huge_memory.c 				vmf->pmd, pmd, vmf->address, page, target_nid);
vmf              1673 mm/huge_memory.c 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
vmf              1674 mm/huge_memory.c 	update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
vmf              1677 mm/huge_memory.c 	spin_unlock(vmf->ptl);
vmf              3343 mm/hugetlb.c   static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
vmf              3949 mm/hugetlb.c   			struct vm_fault vmf = {
vmf              3969 mm/hugetlb.c   			ret = handle_userfault(&vmf, VM_UFFD_MISSING);
vmf                37 mm/internal.h  vm_fault_t do_swap_page(struct vm_fault *vmf);
vmf               365 mm/internal.h  static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
vmf               368 mm/internal.h  	int flags = vmf->flags;
vmf               380 mm/internal.h  		fpin = get_file(vmf->vma->vm_file);
vmf               381 mm/internal.h  		up_read(&vmf->vma->vm_mm->mmap_sem);
vmf               897 mm/khugepaged.c 	struct vm_fault vmf = {
vmf               910 mm/khugepaged.c 	vmf.pte = pte_offset_map(pmd, address);
vmf               911 mm/khugepaged.c 	for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
vmf               912 mm/khugepaged.c 			vmf.pte++, vmf.address += PAGE_SIZE) {
vmf               913 mm/khugepaged.c 		vmf.orig_pte = *vmf.pte;
vmf               914 mm/khugepaged.c 		if (!is_swap_pte(vmf.orig_pte))
vmf               917 mm/khugepaged.c 		ret = do_swap_page(&vmf);
vmf               922 mm/khugepaged.c 			if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
vmf               938 mm/khugepaged.c 		vmf.pte = pte_offset_map(pmd, vmf.address);
vmf               940 mm/khugepaged.c 	vmf.pte--;
vmf               941 mm/khugepaged.c 	pte_unmap(vmf.pte);
vmf              2196 mm/memory.c    static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
vmf              2199 mm/memory.c    	struct page *page = vmf->page;
vmf              2200 mm/memory.c    	unsigned int old_flags = vmf->flags;
vmf              2202 mm/memory.c    	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
vmf              2204 mm/memory.c    	if (vmf->vma->vm_file &&
vmf              2205 mm/memory.c    	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
vmf              2208 mm/memory.c    	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
vmf              2210 mm/memory.c    	vmf->flags = old_flags;
vmf              2230 mm/memory.c    static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
vmf              2232 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              2234 mm/memory.c    	struct page *page = vmf->page;
vmf              2264 mm/memory.c    		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
vmf              2283 mm/memory.c    static inline void wp_page_reuse(struct vm_fault *vmf)
vmf              2284 mm/memory.c    	__releases(vmf->ptl)
vmf              2286 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              2287 mm/memory.c    	struct page *page = vmf->page;
vmf              2297 mm/memory.c    	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
vmf              2298 mm/memory.c    	entry = pte_mkyoung(vmf->orig_pte);
vmf              2300 mm/memory.c    	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
vmf              2301 mm/memory.c    		update_mmu_cache(vma, vmf->address, vmf->pte);
vmf              2302 mm/memory.c    	pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              2321 mm/memory.c    static vm_fault_t wp_page_copy(struct vm_fault *vmf)
vmf              2323 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              2325 mm/memory.c    	struct page *old_page = vmf->page;
vmf              2335 mm/memory.c    	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
vmf              2337 mm/memory.c    							      vmf->address);
vmf              2342 mm/memory.c    				vmf->address);
vmf              2345 mm/memory.c    		cow_user_page(new_page, old_page, vmf->address, vma);
vmf              2354 mm/memory.c    				vmf->address & PAGE_MASK,
vmf              2355 mm/memory.c    				(vmf->address & PAGE_MASK) + PAGE_SIZE);
vmf              2361 mm/memory.c    	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
vmf              2362 mm/memory.c    	if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
vmf              2372 mm/memory.c    		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
vmf              2381 mm/memory.c    		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
vmf              2382 mm/memory.c    		page_add_new_anon_rmap(new_page, vma, vmf->address, false);
vmf              2390 mm/memory.c    		set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
vmf              2391 mm/memory.c    		update_mmu_cache(vma, vmf->address, vmf->pte);
vmf              2428 mm/memory.c    	pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              2472 mm/memory.c    vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
vmf              2474 mm/memory.c    	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
vmf              2475 mm/memory.c    	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
vmf              2476 mm/memory.c    				       &vmf->ptl);
vmf              2481 mm/memory.c    	if (!pte_same(*vmf->pte, vmf->orig_pte)) {
vmf              2482 mm/memory.c    		pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              2485 mm/memory.c    	wp_page_reuse(vmf);
vmf              2493 mm/memory.c    static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
vmf              2495 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              2500 mm/memory.c    		pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              2501 mm/memory.c    		vmf->flags |= FAULT_FLAG_MKWRITE;
vmf              2502 mm/memory.c    		ret = vma->vm_ops->pfn_mkwrite(vmf);
vmf              2505 mm/memory.c    		return finish_mkwrite_fault(vmf);
vmf              2507 mm/memory.c    	wp_page_reuse(vmf);
vmf              2511 mm/memory.c    static vm_fault_t wp_page_shared(struct vm_fault *vmf)
vmf              2512 mm/memory.c    	__releases(vmf->ptl)
vmf              2514 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              2517 mm/memory.c    	get_page(vmf->page);
vmf              2522 mm/memory.c    		pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              2523 mm/memory.c    		tmp = do_page_mkwrite(vmf);
vmf              2526 mm/memory.c    			put_page(vmf->page);
vmf              2529 mm/memory.c    		tmp = finish_mkwrite_fault(vmf);
vmf              2531 mm/memory.c    			unlock_page(vmf->page);
vmf              2532 mm/memory.c    			put_page(vmf->page);
vmf              2536 mm/memory.c    		wp_page_reuse(vmf);
vmf              2537 mm/memory.c    		lock_page(vmf->page);
vmf              2539 mm/memory.c    	ret |= fault_dirty_shared_page(vmf);
vmf              2540 mm/memory.c    	put_page(vmf->page);
vmf              2563 mm/memory.c    static vm_fault_t do_wp_page(struct vm_fault *vmf)
vmf              2564 mm/memory.c    	__releases(vmf->ptl)
vmf              2566 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              2568 mm/memory.c    	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
vmf              2569 mm/memory.c    	if (!vmf->page) {
vmf              2579 mm/memory.c    			return wp_pfn_shared(vmf);
vmf              2581 mm/memory.c    		pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              2582 mm/memory.c    		return wp_page_copy(vmf);
vmf              2589 mm/memory.c    	if (PageAnon(vmf->page)) {
vmf              2591 mm/memory.c    		if (PageKsm(vmf->page) && (PageSwapCache(vmf->page) ||
vmf              2592 mm/memory.c    					   page_count(vmf->page) != 1))
vmf              2594 mm/memory.c    		if (!trylock_page(vmf->page)) {
vmf              2595 mm/memory.c    			get_page(vmf->page);
vmf              2596 mm/memory.c    			pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              2597 mm/memory.c    			lock_page(vmf->page);
vmf              2598 mm/memory.c    			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf              2599 mm/memory.c    					vmf->address, &vmf->ptl);
vmf              2600 mm/memory.c    			if (!pte_same(*vmf->pte, vmf->orig_pte)) {
vmf              2601 mm/memory.c    				unlock_page(vmf->page);
vmf              2602 mm/memory.c    				pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              2603 mm/memory.c    				put_page(vmf->page);
vmf              2606 mm/memory.c    			put_page(vmf->page);
vmf              2608 mm/memory.c    		if (PageKsm(vmf->page)) {
vmf              2609 mm/memory.c    			bool reused = reuse_ksm_page(vmf->page, vmf->vma,
vmf              2610 mm/memory.c    						     vmf->address);
vmf              2611 mm/memory.c    			unlock_page(vmf->page);
vmf              2614 mm/memory.c    			wp_page_reuse(vmf);
vmf              2617 mm/memory.c    		if (reuse_swap_page(vmf->page, &total_map_swapcount)) {
vmf              2626 mm/memory.c    				page_move_anon_rmap(vmf->page, vma);
vmf              2628 mm/memory.c    			unlock_page(vmf->page);
vmf              2629 mm/memory.c    			wp_page_reuse(vmf);
vmf              2632 mm/memory.c    		unlock_page(vmf->page);
vmf              2635 mm/memory.c    		return wp_page_shared(vmf);
vmf              2641 mm/memory.c    	get_page(vmf->page);
vmf              2643 mm/memory.c    	pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              2644 mm/memory.c    	return wp_page_copy(vmf);
vmf              2751 mm/memory.c    vm_fault_t do_swap_page(struct vm_fault *vmf)
vmf              2753 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              2762 mm/memory.c    	if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
vmf              2765 mm/memory.c    	entry = pte_to_swp_entry(vmf->orig_pte);
vmf              2768 mm/memory.c    			migration_entry_wait(vma->vm_mm, vmf->pmd,
vmf              2769 mm/memory.c    					     vmf->address);
vmf              2771 mm/memory.c    			vmf->page = device_private_entry_to_page(entry);
vmf              2772 mm/memory.c    			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
vmf              2776 mm/memory.c    			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
vmf              2784 mm/memory.c    	page = lookup_swap_cache(entry, vma, vmf->address);
vmf              2794 mm/memory.c    							vmf->address);
vmf              2804 mm/memory.c    						vmf);
vmf              2813 mm/memory.c    			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf              2814 mm/memory.c    					vmf->address, &vmf->ptl);
vmf              2815 mm/memory.c    			if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
vmf              2835 mm/memory.c    	locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
vmf              2853 mm/memory.c    	page = ksm_might_need_to_copy(page, vma, vmf->address);
vmf              2869 mm/memory.c    	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
vmf              2870 mm/memory.c    			&vmf->ptl);
vmf              2871 mm/memory.c    	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
vmf              2892 mm/memory.c    	if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
vmf              2894 mm/memory.c    		vmf->flags &= ~FAULT_FLAG_WRITE;
vmf              2899 mm/memory.c    	if (pte_swp_soft_dirty(vmf->orig_pte))
vmf              2901 mm/memory.c    	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
vmf              2902 mm/memory.c    	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
vmf              2903 mm/memory.c    	vmf->orig_pte = pte;
vmf              2907 mm/memory.c    		page_add_new_anon_rmap(page, vma, vmf->address, false);
vmf              2911 mm/memory.c    		do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
vmf              2934 mm/memory.c    	if (vmf->flags & FAULT_FLAG_WRITE) {
vmf              2935 mm/memory.c    		ret |= do_wp_page(vmf);
vmf              2942 mm/memory.c    	update_mmu_cache(vma, vmf->address, vmf->pte);
vmf              2944 mm/memory.c    	pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              2949 mm/memory.c    	pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              2966 mm/memory.c    static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
vmf              2968 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              2988 mm/memory.c    	if (pte_alloc(vma->vm_mm, vmf->pmd))
vmf              2992 mm/memory.c    	if (unlikely(pmd_trans_unstable(vmf->pmd)))
vmf              2996 mm/memory.c    	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
vmf              2998 mm/memory.c    		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
vmf              3000 mm/memory.c    		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf              3001 mm/memory.c    				vmf->address, &vmf->ptl);
vmf              3002 mm/memory.c    		if (!pte_none(*vmf->pte))
vmf              3009 mm/memory.c    			pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              3010 mm/memory.c    			return handle_userfault(vmf, VM_UFFD_MISSING);
vmf              3018 mm/memory.c    	page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
vmf              3037 mm/memory.c    	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
vmf              3038 mm/memory.c    			&vmf->ptl);
vmf              3039 mm/memory.c    	if (!pte_none(*vmf->pte))
vmf              3048 mm/memory.c    		pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              3051 mm/memory.c    		return handle_userfault(vmf, VM_UFFD_MISSING);
vmf              3055 mm/memory.c    	page_add_new_anon_rmap(page, vma, vmf->address, false);
vmf              3059 mm/memory.c    	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
vmf              3062 mm/memory.c    	update_mmu_cache(vma, vmf->address, vmf->pte);
vmf              3064 mm/memory.c    	pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              3081 mm/memory.c    static vm_fault_t __do_fault(struct vm_fault *vmf)
vmf              3083 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              3101 mm/memory.c    	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
vmf              3102 mm/memory.c    		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
vmf              3103 mm/memory.c    		if (!vmf->prealloc_pte)
vmf              3108 mm/memory.c    	ret = vma->vm_ops->fault(vmf);
vmf              3113 mm/memory.c    	if (unlikely(PageHWPoison(vmf->page))) {
vmf              3115 mm/memory.c    			unlock_page(vmf->page);
vmf              3116 mm/memory.c    		put_page(vmf->page);
vmf              3117 mm/memory.c    		vmf->page = NULL;
vmf              3122 mm/memory.c    		lock_page(vmf->page);
vmf              3124 mm/memory.c    		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
vmf              3140 mm/memory.c    static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
vmf              3142 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              3144 mm/memory.c    	if (!pmd_none(*vmf->pmd))
vmf              3146 mm/memory.c    	if (vmf->prealloc_pte) {
vmf              3147 mm/memory.c    		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
vmf              3148 mm/memory.c    		if (unlikely(!pmd_none(*vmf->pmd))) {
vmf              3149 mm/memory.c    			spin_unlock(vmf->ptl);
vmf              3154 mm/memory.c    		pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
vmf              3155 mm/memory.c    		spin_unlock(vmf->ptl);
vmf              3156 mm/memory.c    		vmf->prealloc_pte = NULL;
vmf              3157 mm/memory.c    	} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
vmf              3172 mm/memory.c    	if (pmd_devmap_trans_unstable(vmf->pmd))
vmf              3184 mm/memory.c    	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
vmf              3185 mm/memory.c    			&vmf->ptl);
vmf              3190 mm/memory.c    static void deposit_prealloc_pte(struct vm_fault *vmf)
vmf              3192 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              3194 mm/memory.c    	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
vmf              3200 mm/memory.c    	vmf->prealloc_pte = NULL;
vmf              3203 mm/memory.c    static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
vmf              3205 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              3206 mm/memory.c    	bool write = vmf->flags & FAULT_FLAG_WRITE;
vmf              3207 mm/memory.c    	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
vmf              3222 mm/memory.c    	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
vmf              3223 mm/memory.c    		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
vmf              3224 mm/memory.c    		if (!vmf->prealloc_pte)
vmf              3229 mm/memory.c    	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
vmf              3230 mm/memory.c    	if (unlikely(!pmd_none(*vmf->pmd)))
vmf              3246 mm/memory.c    		deposit_prealloc_pte(vmf);
vmf              3248 mm/memory.c    	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
vmf              3250 mm/memory.c    	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
vmf              3256 mm/memory.c    	spin_unlock(vmf->ptl);
vmf              3260 mm/memory.c    static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
vmf              3283 mm/memory.c    vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
vmf              3286 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              3287 mm/memory.c    	bool write = vmf->flags & FAULT_FLAG_WRITE;
vmf              3291 mm/memory.c    	if (pmd_none(*vmf->pmd) && PageTransCompound(page) &&
vmf              3296 mm/memory.c    		ret = do_set_pmd(vmf, page);
vmf              3301 mm/memory.c    	if (!vmf->pte) {
vmf              3302 mm/memory.c    		ret = pte_alloc_one_map(vmf);
vmf              3308 mm/memory.c    	if (unlikely(!pte_none(*vmf->pte)))
vmf              3318 mm/memory.c    		page_add_new_anon_rmap(page, vma, vmf->address, false);
vmf              3325 mm/memory.c    	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
vmf              3328 mm/memory.c    	update_mmu_cache(vma, vmf->address, vmf->pte);
vmf              3349 mm/memory.c    vm_fault_t finish_fault(struct vm_fault *vmf)
vmf              3355 mm/memory.c    	if ((vmf->flags & FAULT_FLAG_WRITE) &&
vmf              3356 mm/memory.c    	    !(vmf->vma->vm_flags & VM_SHARED))
vmf              3357 mm/memory.c    		page = vmf->cow_page;
vmf              3359 mm/memory.c    		page = vmf->page;
vmf              3365 mm/memory.c    	if (!(vmf->vma->vm_flags & VM_SHARED))
vmf              3366 mm/memory.c    		ret = check_stable_address_space(vmf->vma->vm_mm);
vmf              3368 mm/memory.c    		ret = alloc_set_pte(vmf, vmf->memcg, page);
vmf              3369 mm/memory.c    	if (vmf->pte)
vmf              3370 mm/memory.c    		pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              3434 mm/memory.c    static vm_fault_t do_fault_around(struct vm_fault *vmf)
vmf              3436 mm/memory.c    	unsigned long address = vmf->address, nr_pages, mask;
vmf              3437 mm/memory.c    	pgoff_t start_pgoff = vmf->pgoff;
vmf              3445 mm/memory.c    	vmf->address = max(address & mask, vmf->vma->vm_start);
vmf              3446 mm/memory.c    	off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
vmf              3454 mm/memory.c    		((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
vmf              3456 mm/memory.c    	end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
vmf              3459 mm/memory.c    	if (pmd_none(*vmf->pmd)) {
vmf              3460 mm/memory.c    		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
vmf              3461 mm/memory.c    		if (!vmf->prealloc_pte)
vmf              3466 mm/memory.c    	vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
vmf              3469 mm/memory.c    	if (pmd_trans_huge(*vmf->pmd)) {
vmf              3475 mm/memory.c    	if (!vmf->pte)
vmf              3479 mm/memory.c    	vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
vmf              3480 mm/memory.c    	if (!pte_none(*vmf->pte))
vmf              3482 mm/memory.c    	pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              3484 mm/memory.c    	vmf->address = address;
vmf              3485 mm/memory.c    	vmf->pte = NULL;
vmf              3489 mm/memory.c    static vm_fault_t do_read_fault(struct vm_fault *vmf)
vmf              3491 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              3500 mm/memory.c    		ret = do_fault_around(vmf);
vmf              3505 mm/memory.c    	ret = __do_fault(vmf);
vmf              3509 mm/memory.c    	ret |= finish_fault(vmf);
vmf              3510 mm/memory.c    	unlock_page(vmf->page);
vmf              3512 mm/memory.c    		put_page(vmf->page);
vmf              3516 mm/memory.c    static vm_fault_t do_cow_fault(struct vm_fault *vmf)
vmf              3518 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              3524 mm/memory.c    	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
vmf              3525 mm/memory.c    	if (!vmf->cow_page)
vmf              3528 mm/memory.c    	if (mem_cgroup_try_charge_delay(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
vmf              3529 mm/memory.c    				&vmf->memcg, false)) {
vmf              3530 mm/memory.c    		put_page(vmf->cow_page);
vmf              3534 mm/memory.c    	ret = __do_fault(vmf);
vmf              3540 mm/memory.c    	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
vmf              3541 mm/memory.c    	__SetPageUptodate(vmf->cow_page);
vmf              3543 mm/memory.c    	ret |= finish_fault(vmf);
vmf              3544 mm/memory.c    	unlock_page(vmf->page);
vmf              3545 mm/memory.c    	put_page(vmf->page);
vmf              3550 mm/memory.c    	mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false);
vmf              3551 mm/memory.c    	put_page(vmf->cow_page);
vmf              3555 mm/memory.c    static vm_fault_t do_shared_fault(struct vm_fault *vmf)
vmf              3557 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              3560 mm/memory.c    	ret = __do_fault(vmf);
vmf              3569 mm/memory.c    		unlock_page(vmf->page);
vmf              3570 mm/memory.c    		tmp = do_page_mkwrite(vmf);
vmf              3573 mm/memory.c    			put_page(vmf->page);
vmf              3578 mm/memory.c    	ret |= finish_fault(vmf);
vmf              3581 mm/memory.c    		unlock_page(vmf->page);
vmf              3582 mm/memory.c    		put_page(vmf->page);
vmf              3586 mm/memory.c    	ret |= fault_dirty_shared_page(vmf);
vmf              3598 mm/memory.c    static vm_fault_t do_fault(struct vm_fault *vmf)
vmf              3600 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              3612 mm/memory.c    		if (unlikely(!pmd_present(*vmf->pmd)))
vmf              3615 mm/memory.c    			vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
vmf              3616 mm/memory.c    						       vmf->pmd,
vmf              3617 mm/memory.c    						       vmf->address,
vmf              3618 mm/memory.c    						       &vmf->ptl);
vmf              3626 mm/memory.c    			if (unlikely(pte_none(*vmf->pte)))
vmf              3631 mm/memory.c    			pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              3633 mm/memory.c    	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
vmf              3634 mm/memory.c    		ret = do_read_fault(vmf);
vmf              3636 mm/memory.c    		ret = do_cow_fault(vmf);
vmf              3638 mm/memory.c    		ret = do_shared_fault(vmf);
vmf              3641 mm/memory.c    	if (vmf->prealloc_pte) {
vmf              3642 mm/memory.c    		pte_free(vm_mm, vmf->prealloc_pte);
vmf              3643 mm/memory.c    		vmf->prealloc_pte = NULL;
vmf              3663 mm/memory.c    static vm_fault_t do_numa_page(struct vm_fault *vmf)
vmf              3665 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vmf              3672 mm/memory.c    	bool was_writable = pte_savedwrite(vmf->orig_pte);
vmf              3680 mm/memory.c    	vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
vmf              3681 mm/memory.c    	spin_lock(vmf->ptl);
vmf              3682 mm/memory.c    	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
vmf              3683 mm/memory.c    		pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              3691 mm/memory.c    	old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
vmf              3696 mm/memory.c    	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
vmf              3697 mm/memory.c    	update_mmu_cache(vma, vmf->address, vmf->pte);
vmf              3699 mm/memory.c    	page = vm_normal_page(vma, vmf->address, pte);
vmf              3701 mm/memory.c    		pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              3707 mm/memory.c    		pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              3731 mm/memory.c    	target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
vmf              3733 mm/memory.c    	pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              3753 mm/memory.c    static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
vmf              3755 mm/memory.c    	if (vma_is_anonymous(vmf->vma))
vmf              3756 mm/memory.c    		return do_huge_pmd_anonymous_page(vmf);
vmf              3757 mm/memory.c    	if (vmf->vma->vm_ops->huge_fault)
vmf              3758 mm/memory.c    		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
vmf              3763 mm/memory.c    static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
vmf              3765 mm/memory.c    	if (vma_is_anonymous(vmf->vma))
vmf              3766 mm/memory.c    		return do_huge_pmd_wp_page(vmf, orig_pmd);
vmf              3767 mm/memory.c    	if (vmf->vma->vm_ops->huge_fault)
vmf              3768 mm/memory.c    		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
vmf              3771 mm/memory.c    	VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);
vmf              3772 mm/memory.c    	__split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
vmf              3782 mm/memory.c    static vm_fault_t create_huge_pud(struct vm_fault *vmf)
vmf              3786 mm/memory.c    	if (vma_is_anonymous(vmf->vma))
vmf              3788 mm/memory.c    	if (vmf->vma->vm_ops->huge_fault)
vmf              3789 mm/memory.c    		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
vmf              3794 mm/memory.c    static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
vmf              3798 mm/memory.c    	if (vma_is_anonymous(vmf->vma))
vmf              3800 mm/memory.c    	if (vmf->vma->vm_ops->huge_fault)
vmf              3801 mm/memory.c    		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
vmf              3821 mm/memory.c    static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
vmf              3825 mm/memory.c    	if (unlikely(pmd_none(*vmf->pmd))) {
vmf              3832 mm/memory.c    		vmf->pte = NULL;
vmf              3835 mm/memory.c    		if (pmd_devmap_trans_unstable(vmf->pmd))
vmf              3843 mm/memory.c    		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
vmf              3844 mm/memory.c    		vmf->orig_pte = *vmf->pte;
vmf              3855 mm/memory.c    		if (pte_none(vmf->orig_pte)) {
vmf              3856 mm/memory.c    			pte_unmap(vmf->pte);
vmf              3857 mm/memory.c    			vmf->pte = NULL;
vmf              3861 mm/memory.c    	if (!vmf->pte) {
vmf              3862 mm/memory.c    		if (vma_is_anonymous(vmf->vma))
vmf              3863 mm/memory.c    			return do_anonymous_page(vmf);
vmf              3865 mm/memory.c    			return do_fault(vmf);
vmf              3868 mm/memory.c    	if (!pte_present(vmf->orig_pte))
vmf              3869 mm/memory.c    		return do_swap_page(vmf);
vmf              3871 mm/memory.c    	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
vmf              3872 mm/memory.c    		return do_numa_page(vmf);
vmf              3874 mm/memory.c    	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
vmf              3875 mm/memory.c    	spin_lock(vmf->ptl);
vmf              3876 mm/memory.c    	entry = vmf->orig_pte;
vmf              3877 mm/memory.c    	if (unlikely(!pte_same(*vmf->pte, entry)))
vmf              3879 mm/memory.c    	if (vmf->flags & FAULT_FLAG_WRITE) {
vmf              3881 mm/memory.c    			return do_wp_page(vmf);
vmf              3885 mm/memory.c    	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
vmf              3886 mm/memory.c    				vmf->flags & FAULT_FLAG_WRITE)) {
vmf              3887 mm/memory.c    		update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
vmf              3895 mm/memory.c    		if (vmf->flags & FAULT_FLAG_WRITE)
vmf              3896 mm/memory.c    			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
vmf              3899 mm/memory.c    	pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf              3912 mm/memory.c    	struct vm_fault vmf = {
vmf              3930 mm/memory.c    	vmf.pud = pud_alloc(mm, p4d, address);
vmf              3931 mm/memory.c    	if (!vmf.pud)
vmf              3933 mm/memory.c    	if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
vmf              3934 mm/memory.c    		ret = create_huge_pud(&vmf);
vmf              3938 mm/memory.c    		pud_t orig_pud = *vmf.pud;
vmf              3946 mm/memory.c    				ret = wp_huge_pud(&vmf, orig_pud);
vmf              3950 mm/memory.c    				huge_pud_set_accessed(&vmf, orig_pud);
vmf              3956 mm/memory.c    	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
vmf              3957 mm/memory.c    	if (!vmf.pmd)
vmf              3959 mm/memory.c    	if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
vmf              3960 mm/memory.c    		ret = create_huge_pmd(&vmf);
vmf              3964 mm/memory.c    		pmd_t orig_pmd = *vmf.pmd;
vmf              3971 mm/memory.c    				pmd_migration_entry_wait(mm, vmf.pmd);
vmf              3976 mm/memory.c    				return do_huge_pmd_numa_page(&vmf, orig_pmd);
vmf              3979 mm/memory.c    				ret = wp_huge_pmd(&vmf, orig_pmd);
vmf              3983 mm/memory.c    				huge_pmd_set_accessed(&vmf, orig_pmd);
vmf              3989 mm/memory.c    	return handle_pte_fault(&vmf);
vmf              3324 mm/mmap.c      static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
vmf              3363 mm/mmap.c      static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
vmf              3365 mm/mmap.c      	struct vm_area_struct *vma = vmf->vma;
vmf              3375 mm/mmap.c      			return sm->fault(sm, vmf->vma, vmf);
vmf              3380 mm/mmap.c      	for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
vmf              3386 mm/mmap.c      		vmf->page = page;
vmf              1693 mm/nommu.c     vm_fault_t filemap_fault(struct vm_fault *vmf)
vmf              1700 mm/nommu.c     void filemap_map_pages(struct vm_fault *vmf,
vmf               149 mm/shmem.c     		struct vm_fault *vmf, vm_fault_t *fault_type);
vmf              1455 mm/shmem.c     	struct vm_fault vmf;
vmf              1458 mm/shmem.c     	vmf.vma = &pvma;
vmf              1459 mm/shmem.c     	vmf.address = 0;
vmf              1460 mm/shmem.c     	page = swap_cluster_readahead(swap, gfp, &vmf);
vmf              1742 mm/shmem.c     	struct vm_area_struct *vma, struct vm_fault *vmf,
vmf              1803 mm/shmem.c     		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
vmf              1990 mm/shmem.c     static vm_fault_t shmem_fault(struct vm_fault *vmf)
vmf              1992 mm/shmem.c     	struct vm_area_struct *vma = vmf->vma;
vmf              2023 mm/shmem.c     		    vmf->pgoff >= shmem_falloc->start &&
vmf              2024 mm/shmem.c     		    vmf->pgoff < shmem_falloc->next) {
vmf              2030 mm/shmem.c     			fpin = maybe_unlock_mmap_for_io(vmf, NULL);
vmf              2066 mm/shmem.c     	err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
vmf              2067 mm/shmem.c     				  gfp, vma, vmf, &ret);
vmf               540 mm/swap_state.c 				struct vm_fault *vmf)
vmf               550 mm/swap_state.c 	struct vm_area_struct *vma = vmf->vma;
vmf               551 mm/swap_state.c 	unsigned long addr = vmf->address;
vmf               640 mm/swap_state.c static void swap_ra_info(struct vm_fault *vmf,
vmf               643 mm/swap_state.c 	struct vm_area_struct *vma = vmf->vma;
vmf               661 mm/swap_state.c 	faddr = vmf->address;
vmf               662 mm/swap_state.c 	orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
vmf               723 mm/swap_state.c 				       struct vm_fault *vmf)
vmf               726 mm/swap_state.c 	struct vm_area_struct *vma = vmf->vma;
vmf               734 mm/swap_state.c 	swap_ra_info(vmf, &ra_info);
vmf               750 mm/swap_state.c 					       vmf->address, &page_allocated);
vmf               765 mm/swap_state.c 	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
vmf               782 mm/swap_state.c 				struct vm_fault *vmf)
vmf               785 mm/swap_state.c 			swap_vma_readahead(entry, gfp_mask, vmf) :
vmf               786 mm/swap_state.c 			swap_cluster_readahead(entry, gfp_mask, vmf);
vmf              1925 mm/swapfile.c  		struct vm_fault vmf;
vmf              1940 mm/swapfile.c  		vmf.vma = vma;
vmf              1941 mm/swapfile.c  		vmf.address = addr;
vmf              1942 mm/swapfile.c  		vmf.pmd = pmd;
vmf              1943 mm/swapfile.c  		page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, &vmf);
vmf               748 samples/vfio-mdev/mbochs.c static vm_fault_t mbochs_region_vm_fault(struct vm_fault *vmf)
vmf               750 samples/vfio-mdev/mbochs.c 	struct vm_area_struct *vma = vmf->vma;
vmf               752 samples/vfio-mdev/mbochs.c 	pgoff_t page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vmf               757 samples/vfio-mdev/mbochs.c 	vmf->page = mbochs_get_page(mdev_state, page_offset);
vmf               758 samples/vfio-mdev/mbochs.c 	if (!vmf->page)
vmf               786 samples/vfio-mdev/mbochs.c static vm_fault_t mbochs_dmabuf_vm_fault(struct vm_fault *vmf)
vmf               788 samples/vfio-mdev/mbochs.c 	struct vm_area_struct *vma = vmf->vma;
vmf               791 samples/vfio-mdev/mbochs.c 	if (WARN_ON(vmf->pgoff >= dmabuf->pagecount))
vmf               794 samples/vfio-mdev/mbochs.c 	vmf->page = dmabuf->pages[vmf->pgoff];
vmf               795 samples/vfio-mdev/mbochs.c 	get_page(vmf->page);
vmf               455 security/selinux/selinuxfs.c static vm_fault_t sel_mmap_policy_fault(struct vm_fault *vmf)
vmf               457 security/selinux/selinuxfs.c 	struct policy_load_memory *plm = vmf->vma->vm_file->private_data;
vmf               461 security/selinux/selinuxfs.c 	if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE))
vmf               464 security/selinux/selinuxfs.c 	offset = vmf->pgoff << PAGE_SHIFT;
vmf               471 security/selinux/selinuxfs.c 	vmf->page = page;
vmf              3239 sound/core/pcm_native.c static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf)
vmf              3241 sound/core/pcm_native.c 	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
vmf              3247 sound/core/pcm_native.c 	vmf->page = virt_to_page(runtime->status);
vmf              3248 sound/core/pcm_native.c 	get_page(vmf->page);
vmf              3275 sound/core/pcm_native.c static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf)
vmf              3277 sound/core/pcm_native.c 	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
vmf              3283 sound/core/pcm_native.c 	vmf->page = virt_to_page(runtime->control);
vmf              3284 sound/core/pcm_native.c 	get_page(vmf->page);
vmf              3364 sound/core/pcm_native.c static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
vmf              3366 sound/core/pcm_native.c 	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
vmf              3375 sound/core/pcm_native.c 	offset = vmf->pgoff << PAGE_SHIFT;
vmf              3386 sound/core/pcm_native.c 	vmf->page = page;
vmf               129 sound/usb/usx2y/us122l.c static vm_fault_t usb_stream_hwdep_vm_fault(struct vm_fault *vmf)
vmf               134 sound/usb/usx2y/us122l.c 	struct us122l *us122l = vmf->vma->vm_private_data;
vmf               142 sound/usb/usx2y/us122l.c 	offset = vmf->pgoff << PAGE_SHIFT;
vmf               157 sound/usb/usx2y/us122l.c 	vmf->page = page;
vmf                21 sound/usb/usx2y/usX2Yhwdep.c static vm_fault_t snd_us428ctls_vm_fault(struct vm_fault *vmf)
vmf                28 sound/usb/usx2y/usX2Yhwdep.c 		   vmf->vma->vm_start,
vmf                29 sound/usb/usx2y/usX2Yhwdep.c 		   vmf->pgoff);
vmf                31 sound/usb/usx2y/usX2Yhwdep.c 	offset = vmf->pgoff << PAGE_SHIFT;
vmf                32 sound/usb/usx2y/usX2Yhwdep.c 	vaddr = (char *)((struct usX2Ydev *)vmf->vma->vm_private_data)->us428ctls_sharedmem + offset;
vmf                35 sound/usb/usx2y/usX2Yhwdep.c 	vmf->page = page;
vmf               645 sound/usb/usx2y/usx2yhwdeppcm.c static vm_fault_t snd_usX2Y_hwdep_pcm_vm_fault(struct vm_fault *vmf)
vmf               650 sound/usb/usx2y/usx2yhwdeppcm.c 	offset = vmf->pgoff << PAGE_SHIFT;
vmf               651 sound/usb/usx2y/usx2yhwdeppcm.c 	vaddr = (char *)((struct usX2Ydev *)vmf->vma->vm_private_data)->hwdep_pcm_shm + offset;
vmf               652 sound/usb/usx2y/usx2yhwdeppcm.c 	vmf->page = virt_to_page(vaddr);
vmf               653 sound/usb/usx2y/usx2yhwdeppcm.c 	get_page(vmf->page);
vmf               152 virt/kvm/arm/arm.c vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
vmf              2711 virt/kvm/kvm_main.c static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
vmf              2713 virt/kvm/kvm_main.c 	struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
vmf              2716 virt/kvm/kvm_main.c 	if (vmf->pgoff == 0)
vmf              2719 virt/kvm/kvm_main.c 	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
vmf              2723 virt/kvm/kvm_main.c 	else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
vmf              2727 virt/kvm/kvm_main.c 		return kvm_arch_vcpu_fault(vcpu, vmf);
vmf              2729 virt/kvm/kvm_main.c 	vmf->page = page;