Lines Matching refs:vmf
257 struct vm_fault *vmf) in dax_load_hole() argument
262 page = find_or_create_page(mapping, vmf->pgoff, in dax_load_hole()
268 if (vmf->pgoff >= size) { in dax_load_hole()
274 vmf->page = page; in dax_load_hole()
293 struct vm_area_struct *vma, struct vm_fault *vmf) in dax_insert_mapping() argument
297 unsigned long vaddr = (unsigned long)vmf->virtual_address; in dax_insert_mapping()
313 if (unlikely(vmf->pgoff >= size)) { in dax_insert_mapping()
355 int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, in __dax_fault() argument
363 unsigned long vaddr = (unsigned long)vmf->virtual_address; in __dax_fault()
371 if (vmf->pgoff >= size) in __dax_fault()
375 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits); in __dax_fault()
379 page = find_get_page(mapping, vmf->pgoff); in __dax_fault()
381 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { in __dax_fault()
391 if (unlikely(vmf->pgoff >= size)) { in __dax_fault()
407 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) { in __dax_fault()
408 if (vmf->flags & FAULT_FLAG_WRITE) { in __dax_fault()
418 return dax_load_hole(mapping, page, vmf); in __dax_fault()
422 if (vmf->cow_page) { in __dax_fault()
423 struct page *new_page = vmf->cow_page; in __dax_fault()
430 vmf->page = page; in __dax_fault()
436 if (vmf->pgoff >= size) { in __dax_fault()
447 page = find_lock_page(mapping, vmf->pgoff); in __dax_fault()
450 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, in __dax_fault()
467 error = dax_insert_mapping(inode, &bh, vma, vmf); in __dax_fault()
472 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE)); in __dax_fault()
501 int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, in dax_fault() argument
507 if (vmf->flags & FAULT_FLAG_WRITE) { in dax_fault()
511 result = __dax_fault(vma, vmf, get_block, complete_unwritten); in dax_fault()
512 if (vmf->flags & FAULT_FLAG_WRITE) in dax_fault()
707 int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) in dax_pfn_mkwrite() argument